aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-06-01 17:58:36 -0400
committerDave Airlie <airlied@redhat.com>2016-06-01 17:58:36 -0400
commit66fd7a66e8b9e11e49f46ea77910f935c4dee5c3 (patch)
treecc9dd78568036c1d4d0313bcd74f017b69a106c4
parent65439b68bb10afd877af05463bbff5d25200fd06 (diff)
parente42aeef1237b7c969a77b7f726c50f6cb832185f (diff)
Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2016-05-22: - cmd-parser support for direct reg->reg loads (Ken Graunke) - better handle DP++ smart dongles (Ville) - bxt guc fw loading support (Nick Hoathe) - remove a bunch of struct typedefs from dpll code (Ander) - tons of small work all over to avoid casting between drm_device and the i915 dev struct (Tvrtko&Chris) - untangle request retiring from other operations, also fixes reset stat corner cases (Chris) - skl atomic watermark support from Matt Roper, yay! - various wm handling bugfixes from Ville - big pile of cdclck rework for bxt/skl (Ville) - CABC (Content Adaptive Brigthness Control) for dsi panels (Jani&Deepak M) - nonblocking atomic commits for plane-only updates (Maarten Lankhorst) - bunch of PSR fixes&improvements - untangle our map/pin/sg_iter code a bit (Dave Gordon) drm-intel-next-2016-05-08: - refactor stolen quirks to share code between early quirks and i915 (Joonas) - refactor gem BO/vma funcstion (Tvrtko&Dave) - backlight over DPCD support (Yetunde Abedisi) - more dsi panel sequence support (Jani) - lots of refactoring around handling iomaps, vma, ring access and related topics culmulating in removing the duplicated request tracking in the execlist code (Chris & Tvrtko) includes a small patch for core iomapping code - hw state readout for bxt dsi (Ramalingam C) - cdclk cleanups (Ville) - dedupe chv pll code a bit (Ander) - enable semaphores on gen8+ for legacy submission, to be able to have a direct comparison against execlist on the same platform (Chris) Not meant to be used for anything else but performance tuning - lvds border bit hw state checker fix (Jani) - rpm vs. shrinker/oom-notifier fixes (Praveen Paneri) - l3 tuning (Imre) - revert mst dp audio, it's totally non-functional and crash-y (Lyude) - first official dmc for kbl (Rodrigo) - and tons of small things all over as usual * 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (194 commits) drm/i915: Revert async unpin and nonblocking atomic commit drm/i915: Update DRIVER_DATE to 20160522 drm/i915: Inline sg_next() for the optimised SGL iterator drm/i915: Introduce & use new lightweight SGL iterators drm/i915: optimise i915_gem_object_map() for small objects drm/i915: refactor i915_gem_object_pin_map() drm/i915/psr: Implement PSR2 w/a for gen9 drm/i915/psr: Use ->get_aux_send_ctl functions drm/i915/psr: Order DP aux transactions correctly drm/i915/psr: Make idle_frames sensible again drm/i915/psr: Try to program link training times correctly drm/i915/userptr: Convert to drm_i915_private drm/i915: Allow nonblocking update of pageflips. drm/i915: Check for unpin correctness. Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates" drm/i915: Make unpin async. drm/i915: Prepare connectors for nonblocking checks. drm/i915: Pass atomic states to fbc update functions. drm/i915: Remove reset_counter from intel_crtc. drm/i915: Remove queue_flip pointer. ...
-rw-r--r--arch/x86/kernel/early-quirks.c404
-rw-r--r--drivers/gpu/drm/drm_irq.c31
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c44
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c61
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c120
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c101
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h307
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c400
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c291
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c239
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c81
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c485
-rw-r--r--drivers/gpu/drm/i915/i915_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h17
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c9
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h36
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c7
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c19
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c896
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c428
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c172
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c470
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h186
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c49
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c179
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c77
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c35
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c352
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c13
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c617
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h10
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c147
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1075
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c53
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c476
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h37
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c27
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c219
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/linux/io-mapping.h10
72 files changed, 4572 insertions, 3923 deletions
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index bca14c899137..757390eb562b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
223 * despite the efforts of the "RAM buffer" approach, which simply rounds 223 * despite the efforts of the "RAM buffer" approach, which simply rounds
224 * memory boundaries up to 64M to try to catch space that may decode 224 * memory boundaries up to 64M to try to catch space that may decode
225 * as RAM and so is not suitable for MMIO. 225 * as RAM and so is not suitable for MMIO.
226 *
227 * And yes, so far on current devices the base addr is always under 4G.
228 */ 226 */
229static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
230{
231 u32 base;
232
233 /*
234 * For the PCI IDs in this quirk, the stolen base is always
235 * in 0x5c, aka the BDSM register (yes that's really what
236 * it's called).
237 */
238 base = read_pci_config(num, slot, func, 0x5c);
239 base &= ~((1<<20) - 1);
240
241 return base;
242}
243 227
244#define KB(x) ((x) * 1024UL) 228#define KB(x) ((x) * 1024UL)
245#define MB(x) (KB (KB (x))) 229#define MB(x) (KB (KB (x)))
246#define GB(x) (MB (KB (x)))
247 230
248static size_t __init i830_tseg_size(void) 231static size_t __init i830_tseg_size(void)
249{ 232{
250 u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); 233 u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
251 234
252 if (!(tmp & TSEG_ENABLE)) 235 if (!(esmramc & TSEG_ENABLE))
253 return 0; 236 return 0;
254 237
255 if (tmp & I830_TSEG_SIZE_1M) 238 if (esmramc & I830_TSEG_SIZE_1M)
256 return MB(1); 239 return MB(1);
257 else 240 else
258 return KB(512); 241 return KB(512);
@@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void)
260 243
261static size_t __init i845_tseg_size(void) 244static size_t __init i845_tseg_size(void)
262{ 245{
263 u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); 246 u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
247 u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
264 248
265 if (!(tmp & TSEG_ENABLE)) 249 if (!(esmramc & TSEG_ENABLE))
266 return 0; 250 return 0;
267 251
268 switch (tmp & I845_TSEG_SIZE_MASK) { 252 switch (tseg_size) {
269 case I845_TSEG_SIZE_512K: 253 case I845_TSEG_SIZE_512K: return KB(512);
270 return KB(512); 254 case I845_TSEG_SIZE_1M: return MB(1);
271 case I845_TSEG_SIZE_1M:
272 return MB(1);
273 default: 255 default:
274 WARN_ON(1); 256 WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
275 return 0;
276 } 257 }
258 return 0;
277} 259}
278 260
279static size_t __init i85x_tseg_size(void) 261static size_t __init i85x_tseg_size(void)
280{ 262{
281 u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); 263 u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
282 264
283 if (!(tmp & TSEG_ENABLE)) 265 if (!(esmramc & TSEG_ENABLE))
284 return 0; 266 return 0;
285 267
286 return MB(1); 268 return MB(1);
@@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void)
300 * On 830/845/85x the stolen memory base isn't available in any 282 * On 830/845/85x the stolen memory base isn't available in any
301 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 283 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
302 */ 284 */
303static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) 285static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
286 size_t stolen_size)
304{ 287{
305 return i830_mem_size() - i830_tseg_size() - stolen_size; 288 return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
306} 289}
307 290
308static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) 291static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
292 size_t stolen_size)
309{ 293{
310 return i830_mem_size() - i845_tseg_size() - stolen_size; 294 return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
311} 295}
312 296
313static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) 297static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
298 size_t stolen_size)
314{ 299{
315 return i85x_mem_size() - i85x_tseg_size() - stolen_size; 300 return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
316} 301}
317 302
318static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) 303static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
304 size_t stolen_size)
319{ 305{
306 u16 toud;
307
320 /* 308 /*
321 * FIXME is the graphics stolen memory region 309 * FIXME is the graphics stolen memory region
322 * always at TOUD? Ie. is it always the last 310 * always at TOUD? Ie. is it always the last
323 * one to be allocated by the BIOS? 311 * one to be allocated by the BIOS?
324 */ 312 */
325 return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; 313 toud = read_pci_config_16(0, 0, 0, I865_TOUD);
314
315 return (phys_addr_t)toud << 16;
316}
317
318static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
319 size_t stolen_size)
320{
321 u32 bsm;
322
323 /* Almost universally we can find the Graphics Base of Stolen Memory
324 * at register BSM (0x5c) in the igfx configuration space. On a few
325 * (desktop) machines this is also mirrored in the bridge device at
326 * different locations, or in the MCHBAR.
327 */
328 bsm = read_pci_config(num, slot, func, INTEL_BSM);
329
330 return (phys_addr_t)bsm & INTEL_BSM_MASK;
326} 331}
327 332
328static size_t __init i830_stolen_size(int num, int slot, int func) 333static size_t __init i830_stolen_size(int num, int slot, int func)
329{ 334{
330 size_t stolen_size;
331 u16 gmch_ctrl; 335 u16 gmch_ctrl;
336 u16 gms;
332 337
333 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 338 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
334 339 gms = gmch_ctrl & I830_GMCH_GMS_MASK;
335 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 340
336 case I830_GMCH_GMS_STOLEN_512: 341 switch (gms) {
337 stolen_size = KB(512); 342 case I830_GMCH_GMS_STOLEN_512: return KB(512);
338 break; 343 case I830_GMCH_GMS_STOLEN_1024: return MB(1);
339 case I830_GMCH_GMS_STOLEN_1024: 344 case I830_GMCH_GMS_STOLEN_8192: return MB(8);
340 stolen_size = MB(1); 345 /* local memory isn't part of the normal address space */
341 break; 346 case I830_GMCH_GMS_LOCAL: return 0;
342 case I830_GMCH_GMS_STOLEN_8192:
343 stolen_size = MB(8);
344 break;
345 case I830_GMCH_GMS_LOCAL:
346 /* local memory isn't part of the normal address space */
347 stolen_size = 0;
348 break;
349 default: 347 default:
350 return 0; 348 WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
351 } 349 }
352 350
353 return stolen_size; 351 return 0;
354} 352}
355 353
356static size_t __init gen3_stolen_size(int num, int slot, int func) 354static size_t __init gen3_stolen_size(int num, int slot, int func)
357{ 355{
358 size_t stolen_size;
359 u16 gmch_ctrl; 356 u16 gmch_ctrl;
357 u16 gms;
360 358
361 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 359 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
362 360 gms = gmch_ctrl & I855_GMCH_GMS_MASK;
363 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 361
364 case I855_GMCH_GMS_STOLEN_1M: 362 switch (gms) {
365 stolen_size = MB(1); 363 case I855_GMCH_GMS_STOLEN_1M: return MB(1);
366 break; 364 case I855_GMCH_GMS_STOLEN_4M: return MB(4);
367 case I855_GMCH_GMS_STOLEN_4M: 365 case I855_GMCH_GMS_STOLEN_8M: return MB(8);
368 stolen_size = MB(4); 366 case I855_GMCH_GMS_STOLEN_16M: return MB(16);
369 break; 367 case I855_GMCH_GMS_STOLEN_32M: return MB(32);
370 case I855_GMCH_GMS_STOLEN_8M: 368 case I915_GMCH_GMS_STOLEN_48M: return MB(48);
371 stolen_size = MB(8); 369 case I915_GMCH_GMS_STOLEN_64M: return MB(64);
372 break; 370 case G33_GMCH_GMS_STOLEN_128M: return MB(128);
373 case I855_GMCH_GMS_STOLEN_16M: 371 case G33_GMCH_GMS_STOLEN_256M: return MB(256);
374 stolen_size = MB(16); 372 case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
375 break; 373 case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
376 case I855_GMCH_GMS_STOLEN_32M: 374 case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
377 stolen_size = MB(32); 375 case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
378 break;
379 case I915_GMCH_GMS_STOLEN_48M:
380 stolen_size = MB(48);
381 break;
382 case I915_GMCH_GMS_STOLEN_64M:
383 stolen_size = MB(64);
384 break;
385 case G33_GMCH_GMS_STOLEN_128M:
386 stolen_size = MB(128);
387 break;
388 case G33_GMCH_GMS_STOLEN_256M:
389 stolen_size = MB(256);
390 break;
391 case INTEL_GMCH_GMS_STOLEN_96M:
392 stolen_size = MB(96);
393 break;
394 case INTEL_GMCH_GMS_STOLEN_160M:
395 stolen_size = MB(160);
396 break;
397 case INTEL_GMCH_GMS_STOLEN_224M:
398 stolen_size = MB(224);
399 break;
400 case INTEL_GMCH_GMS_STOLEN_352M:
401 stolen_size = MB(352);
402 break;
403 default: 376 default:
404 stolen_size = 0; 377 WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
405 break;
406 } 378 }
407 379
408 return stolen_size; 380 return 0;
409} 381}
410 382
411static size_t __init gen6_stolen_size(int num, int slot, int func) 383static size_t __init gen6_stolen_size(int num, int slot, int func)
412{ 384{
413 u16 gmch_ctrl; 385 u16 gmch_ctrl;
386 u16 gms;
414 387
415 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 388 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
416 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 389 gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
417 gmch_ctrl &= SNB_GMCH_GMS_MASK;
418 390
419 return gmch_ctrl << 25; /* 32 MB units */ 391 return (size_t)gms * MB(32);
420} 392}
421 393
422static size_t __init gen8_stolen_size(int num, int slot, int func) 394static size_t __init gen8_stolen_size(int num, int slot, int func)
423{ 395{
424 u16 gmch_ctrl; 396 u16 gmch_ctrl;
397 u16 gms;
425 398
426 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 399 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
427 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 400 gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
428 gmch_ctrl &= BDW_GMCH_GMS_MASK; 401
429 return gmch_ctrl << 25; /* 32 MB units */ 402 return (size_t)gms * MB(32);
430} 403}
431 404
432static size_t __init chv_stolen_size(int num, int slot, int func) 405static size_t __init chv_stolen_size(int num, int slot, int func)
433{ 406{
434 u16 gmch_ctrl; 407 u16 gmch_ctrl;
408 u16 gms;
435 409
436 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 410 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
437 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 411 gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
438 gmch_ctrl &= SNB_GMCH_GMS_MASK;
439 412
440 /* 413 /*
441 * 0x0 to 0x10: 32MB increments starting at 0MB 414 * 0x0 to 0x10: 32MB increments starting at 0MB
442 * 0x11 to 0x16: 4MB increments starting at 8MB 415 * 0x11 to 0x16: 4MB increments starting at 8MB
443 * 0x17 to 0x1d: 4MB increments start at 36MB 416 * 0x17 to 0x1d: 4MB increments start at 36MB
444 */ 417 */
445 if (gmch_ctrl < 0x11) 418 if (gms < 0x11)
446 return gmch_ctrl << 25; 419 return (size_t)gms * MB(32);
447 else if (gmch_ctrl < 0x17) 420 else if (gms < 0x17)
448 return (gmch_ctrl - 0x11 + 2) << 22; 421 return (size_t)(gms - 0x11 + 2) * MB(4);
449 else 422 else
450 return (gmch_ctrl - 0x17 + 9) << 22; 423 return (size_t)(gms - 0x17 + 9) * MB(4);
451} 424}
452 425
453struct intel_stolen_funcs {
454 size_t (*size)(int num, int slot, int func);
455 u32 (*base)(int num, int slot, int func, size_t size);
456};
457
458static size_t __init gen9_stolen_size(int num, int slot, int func) 426static size_t __init gen9_stolen_size(int num, int slot, int func)
459{ 427{
460 u16 gmch_ctrl; 428 u16 gmch_ctrl;
429 u16 gms;
461 430
462 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 431 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
463 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 432 gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
464 gmch_ctrl &= BDW_GMCH_GMS_MASK;
465 433
466 if (gmch_ctrl < 0xf0) 434 /* 0x0 to 0xef: 32MB increments starting at 0MB */
467 return gmch_ctrl << 25; /* 32 MB units */ 435 /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
436 if (gms < 0xf0)
437 return (size_t)gms * MB(32);
468 else 438 else
469 /* 4MB increments starting at 0xf0 for 4MB */ 439 return (size_t)(gms - 0xf0 + 1) * MB(4);
470 return (gmch_ctrl - 0xf0 + 1) << 22;
471} 440}
472 441
473typedef size_t (*stolen_size_fn)(int num, int slot, int func); 442struct intel_early_ops {
443 size_t (*stolen_size)(int num, int slot, int func);
444 phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
445};
474 446
475static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { 447static const struct intel_early_ops i830_early_ops __initconst = {
476 .base = i830_stolen_base, 448 .stolen_base = i830_stolen_base,
477 .size = i830_stolen_size, 449 .stolen_size = i830_stolen_size,
478}; 450};
479 451
480static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { 452static const struct intel_early_ops i845_early_ops __initconst = {
481 .base = i845_stolen_base, 453 .stolen_base = i845_stolen_base,
482 .size = i830_stolen_size, 454 .stolen_size = i830_stolen_size,
483}; 455};
484 456
485static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { 457static const struct intel_early_ops i85x_early_ops __initconst = {
486 .base = i85x_stolen_base, 458 .stolen_base = i85x_stolen_base,
487 .size = gen3_stolen_size, 459 .stolen_size = gen3_stolen_size,
488}; 460};
489 461
490static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { 462static const struct intel_early_ops i865_early_ops __initconst = {
491 .base = i865_stolen_base, 463 .stolen_base = i865_stolen_base,
492 .size = gen3_stolen_size, 464 .stolen_size = gen3_stolen_size,
493}; 465};
494 466
495static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { 467static const struct intel_early_ops gen3_early_ops __initconst = {
496 .base = intel_stolen_base, 468 .stolen_base = gen3_stolen_base,
497 .size = gen3_stolen_size, 469 .stolen_size = gen3_stolen_size,
498}; 470};
499 471
500static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { 472static const struct intel_early_ops gen6_early_ops __initconst = {
501 .base = intel_stolen_base, 473 .stolen_base = gen3_stolen_base,
502 .size = gen6_stolen_size, 474 .stolen_size = gen6_stolen_size,
503}; 475};
504 476
505static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { 477static const struct intel_early_ops gen8_early_ops __initconst = {
506 .base = intel_stolen_base, 478 .stolen_base = gen3_stolen_base,
507 .size = gen8_stolen_size, 479 .stolen_size = gen8_stolen_size,
508}; 480};
509 481
510static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { 482static const struct intel_early_ops gen9_early_ops __initconst = {
511 .base = intel_stolen_base, 483 .stolen_base = gen3_stolen_base,
512 .size = gen9_stolen_size, 484 .stolen_size = gen9_stolen_size,
513}; 485};
514 486
515static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { 487static const struct intel_early_ops chv_early_ops __initconst = {
516 .base = intel_stolen_base, 488 .stolen_base = gen3_stolen_base,
517 .size = chv_stolen_size, 489 .stolen_size = chv_stolen_size,
518}; 490};
519 491
520static const struct pci_device_id intel_stolen_ids[] __initconst = { 492static const struct pci_device_id intel_early_ids[] __initconst = {
521 INTEL_I830_IDS(&i830_stolen_funcs), 493 INTEL_I830_IDS(&i830_early_ops),
522 INTEL_I845G_IDS(&i845_stolen_funcs), 494 INTEL_I845G_IDS(&i845_early_ops),
523 INTEL_I85X_IDS(&i85x_stolen_funcs), 495 INTEL_I85X_IDS(&i85x_early_ops),
524 INTEL_I865G_IDS(&i865_stolen_funcs), 496 INTEL_I865G_IDS(&i865_early_ops),
525 INTEL_I915G_IDS(&gen3_stolen_funcs), 497 INTEL_I915G_IDS(&gen3_early_ops),
526 INTEL_I915GM_IDS(&gen3_stolen_funcs), 498 INTEL_I915GM_IDS(&gen3_early_ops),
527 INTEL_I945G_IDS(&gen3_stolen_funcs), 499 INTEL_I945G_IDS(&gen3_early_ops),
528 INTEL_I945GM_IDS(&gen3_stolen_funcs), 500 INTEL_I945GM_IDS(&gen3_early_ops),
529 INTEL_VLV_M_IDS(&gen6_stolen_funcs), 501 INTEL_VLV_M_IDS(&gen6_early_ops),
530 INTEL_VLV_D_IDS(&gen6_stolen_funcs), 502 INTEL_VLV_D_IDS(&gen6_early_ops),
531 INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), 503 INTEL_PINEVIEW_IDS(&gen3_early_ops),
532 INTEL_I965G_IDS(&gen3_stolen_funcs), 504 INTEL_I965G_IDS(&gen3_early_ops),
533 INTEL_G33_IDS(&gen3_stolen_funcs), 505 INTEL_G33_IDS(&gen3_early_ops),
534 INTEL_I965GM_IDS(&gen3_stolen_funcs), 506 INTEL_I965GM_IDS(&gen3_early_ops),
535 INTEL_GM45_IDS(&gen3_stolen_funcs), 507 INTEL_GM45_IDS(&gen3_early_ops),
536 INTEL_G45_IDS(&gen3_stolen_funcs), 508 INTEL_G45_IDS(&gen3_early_ops),
537 INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), 509 INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
538 INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), 510 INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
539 INTEL_SNB_D_IDS(&gen6_stolen_funcs), 511 INTEL_SNB_D_IDS(&gen6_early_ops),
540 INTEL_SNB_M_IDS(&gen6_stolen_funcs), 512 INTEL_SNB_M_IDS(&gen6_early_ops),
541 INTEL_IVB_M_IDS(&gen6_stolen_funcs), 513 INTEL_IVB_M_IDS(&gen6_early_ops),
542 INTEL_IVB_D_IDS(&gen6_stolen_funcs), 514 INTEL_IVB_D_IDS(&gen6_early_ops),
543 INTEL_HSW_D_IDS(&gen6_stolen_funcs), 515 INTEL_HSW_D_IDS(&gen6_early_ops),
544 INTEL_HSW_M_IDS(&gen6_stolen_funcs), 516 INTEL_HSW_M_IDS(&gen6_early_ops),
545 INTEL_BDW_M_IDS(&gen8_stolen_funcs), 517 INTEL_BDW_M_IDS(&gen8_early_ops),
546 INTEL_BDW_D_IDS(&gen8_stolen_funcs), 518 INTEL_BDW_D_IDS(&gen8_early_ops),
547 INTEL_CHV_IDS(&chv_stolen_funcs), 519 INTEL_CHV_IDS(&chv_early_ops),
548 INTEL_SKL_IDS(&gen9_stolen_funcs), 520 INTEL_SKL_IDS(&gen9_early_ops),
549 INTEL_BXT_IDS(&gen9_stolen_funcs), 521 INTEL_BXT_IDS(&gen9_early_ops),
550 INTEL_KBL_IDS(&gen9_stolen_funcs), 522 INTEL_KBL_IDS(&gen9_early_ops),
551}; 523};
552 524
553static void __init intel_graphics_stolen(int num, int slot, int func) 525static void __init
526intel_graphics_stolen(int num, int slot, int func,
527 const struct intel_early_ops *early_ops)
554{ 528{
529 phys_addr_t base, end;
555 size_t size; 530 size_t size;
531
532 size = early_ops->stolen_size(num, slot, func);
533 base = early_ops->stolen_base(num, slot, func, size);
534
535 if (!size || !base)
536 return;
537
538 end = base + size - 1;
539 printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
540 &base, &end);
541
542 /* Mark this space as reserved */
543 e820_add_region(base, size, E820_RESERVED);
544 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
545}
546
547static void __init intel_graphics_quirks(int num, int slot, int func)
548{
549 const struct intel_early_ops *early_ops;
550 u16 device;
556 int i; 551 int i;
557 u32 start;
558 u16 device, subvendor, subdevice;
559 552
560 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 553 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
561 subvendor = read_pci_config_16(num, slot, func, 554
562 PCI_SUBSYSTEM_VENDOR_ID); 555 for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
563 subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); 556 kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
564 557
565 for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { 558 if (intel_early_ids[i].device != device)
566 if (intel_stolen_ids[i].device == device) { 559 continue;
567 const struct intel_stolen_funcs *stolen_funcs = 560
568 (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; 561 early_ops = (typeof(early_ops))driver_data;
569 size = stolen_funcs->size(num, slot, func); 562
570 start = stolen_funcs->base(num, slot, func, size); 563 intel_graphics_stolen(num, slot, func, early_ops);
571 if (size && start) { 564
572 printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", 565 return;
573 start, start + (u32)size - 1);
574 /* Mark this space as reserved */
575 e820_add_region(start, size, E820_RESERVED);
576 sanitize_e820_map(e820.map,
577 ARRAY_SIZE(e820.map),
578 &e820.nr_map);
579 }
580 return;
581 }
582 } 566 }
583} 567}
584 568
@@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = {
627 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 611 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
628 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 612 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
629 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 613 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
630 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 614 QFLAG_APPLY_ONCE, intel_graphics_quirks },
631 /* 615 /*
632 * HPET on the current version of the Baytrail platform has accuracy 616 * HPET on the current version of the Baytrail platform has accuracy
633 * problems: it will halt in deep idle state - so we disable it. 617 * problems: it will halt in deep idle state - so we disable it.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 66e5c1e412d3..5a773e437e2f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -242,6 +242,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
242 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); 242 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
243} 243}
244 244
245/**
246 * drm_accurate_vblank_count - retrieve the master vblank counter
247 * @crtc: which counter to retrieve
248 *
249 * This function is similar to @drm_crtc_vblank_count but this
250 * function interpolates to handle a race with vblank irq's.
251 *
252 * This is mostly useful for hardware that can obtain the scanout
253 * position, but doesn't have a frame counter.
254 */
255u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
256{
257 struct drm_device *dev = crtc->dev;
258 unsigned int pipe = drm_crtc_index(crtc);
259 u32 vblank;
260 unsigned long flags;
261
262 WARN(!dev->driver->get_vblank_timestamp,
263 "This function requires support for accurate vblank timestamps.");
264
265 spin_lock_irqsave(&dev->vblank_time_lock, flags);
266
267 drm_update_vblank_count(dev, pipe, 0);
268 vblank = drm_vblank_count(dev, pipe);
269
270 spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
271
272 return vblank;
273}
274EXPORT_SYMBOL(drm_accurate_vblank_count);
275
245/* 276/*
246 * Disable vblank irq's on crtc, make sure that last vblank count 277 * Disable vblank irq's on crtc, make sure that last vblank count
247 * of hardware and corresponding consistent software vblank counter 278 * of hardware and corresponding consistent software vblank counter
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0b88ba0f3c1f..7e2944406b8f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += intel_audio.o \
59 intel_bios.o \ 59 intel_bios.o \
60 intel_color.o \ 60 intel_color.o \
61 intel_display.o \ 61 intel_display.o \
62 intel_dpio_phy.o \
62 intel_dpll_mgr.o \ 63 intel_dpll_mgr.o \
63 intel_fbc.o \ 64 intel_fbc.o \
64 intel_fifo_underrun.o \ 65 intel_fifo_underrun.o \
@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
81 dvo_tfp410.o \ 82 dvo_tfp410.o \
82 intel_crt.o \ 83 intel_crt.o \
83 intel_ddi.o \ 84 intel_ddi.o \
85 intel_dp_aux_backlight.o \
84 intel_dp_link_training.o \ 86 intel_dp_link_training.o \
85 intel_dp_mst.o \ 87 intel_dp_mst.o \
86 intel_dp.o \ 88 intel_dp.o \
87 intel_dsi.o \ 89 intel_dsi.o \
90 intel_dsi_dcs_backlight.o \
88 intel_dsi_panel_vbt.o \ 91 intel_dsi_panel_vbt.o \
89 intel_dsi_pll.o \ 92 intel_dsi_pll.o \
90 intel_dvo.o \ 93 intel_dvo.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..d97f28bfa9db 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ), 215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), 216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), 217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), 218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
219 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
219 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), 220 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
220 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), 221 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
221 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), 222 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
750 int cmd_table_count; 751 int cmd_table_count;
751 int ret; 752 int ret;
752 753
753 if (!IS_GEN7(engine->dev)) 754 if (!IS_GEN7(engine->i915))
754 return 0; 755 return 0;
755 756
756 switch (engine->id) { 757 switch (engine->id) {
757 case RCS: 758 case RCS:
758 if (IS_HASWELL(engine->dev)) { 759 if (IS_HASWELL(engine->i915)) {
759 cmd_tables = hsw_render_ring_cmds; 760 cmd_tables = hsw_render_ring_cmds;
760 cmd_table_count = 761 cmd_table_count =
761 ARRAY_SIZE(hsw_render_ring_cmds); 762 ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
764 cmd_table_count = ARRAY_SIZE(gen7_render_cmds); 765 cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
765 } 766 }
766 767
767 if (IS_HASWELL(engine->dev)) { 768 if (IS_HASWELL(engine->i915)) {
768 engine->reg_tables = hsw_render_reg_tables; 769 engine->reg_tables = hsw_render_reg_tables;
769 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); 770 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
770 } else { 771 } else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
780 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 781 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
781 break; 782 break;
782 case BCS: 783 case BCS:
783 if (IS_HASWELL(engine->dev)) { 784 if (IS_HASWELL(engine->i915)) {
784 cmd_tables = hsw_blt_ring_cmds; 785 cmd_tables = hsw_blt_ring_cmds;
785 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); 786 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
786 } else { 787 } else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
788 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); 789 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
789 } 790 }
790 791
791 if (IS_HASWELL(engine->dev)) { 792 if (IS_HASWELL(engine->i915)) {
792 engine->reg_tables = hsw_blt_reg_tables; 793 engine->reg_tables = hsw_blt_reg_tables;
793 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); 794 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
794 } else { 795 } else {
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
1035 if (!engine->needs_cmd_parser) 1036 if (!engine->needs_cmd_parser)
1036 return false; 1037 return false;
1037 1038
1038 if (!USES_PPGTT(engine->dev)) 1039 if (!USES_PPGTT(engine->i915))
1039 return false; 1040 return false;
1040 1041
1041 return (i915.enable_cmd_parser == 1); 1042 return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1098 return false; 1099 return false;
1099 } 1100 }
1100 1101
1102 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1103 DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
1104 return false;
1105 }
1106
1101 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 1107 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
1102 *oacontrol_set = (cmd[offset + 1] != 0); 1108 *oacontrol_set = (cmd[offset + 1] != 0);
1103 } 1109 }
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1113 return false; 1119 return false;
1114 } 1120 }
1115 1121
1122 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1123 DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
1124 reg_addr);
1125 return false;
1126 }
1127
1116 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && 1128 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
1117 (offset + 2 > length || 1129 (offset + 2 > length ||
1118 (cmd[offset + 1] & reg->mask) != reg->value)) { 1130 (cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
1275 * 1287 *
1276 * Return: the current version number of the cmd parser 1288 * Return: the current version number of the cmd parser
1277 */ 1289 */
1278int i915_cmd_parser_get_version(void) 1290int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1279{ 1291{
1292 struct intel_engine_cs *engine;
1293 bool active = false;
1294
1295 /* If the command parser is not enabled, report 0 - unsupported */
1296 for_each_engine(engine, dev_priv) {
1297 if (i915_needs_cmd_parser(engine)) {
1298 active = true;
1299 break;
1300 }
1301 }
1302 if (!active)
1303 return 0;
1304
1280 /* 1305 /*
1281 * Command parser version history 1306 * Command parser version history
1282 * 1307 *
@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void)
1288 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. 1313 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1289 * 5. GPGPU dispatch compute indirect registers. 1314 * 5. GPGPU dispatch compute indirect registers.
1290 * 6. TIMESTAMP register and Haswell CS GPR registers 1315 * 6. TIMESTAMP register and Haswell CS GPR registers
1316 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1291 */ 1317 */
1292 return 6; 1318 return 7;
1293} 1319}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32690332d441..0a4bedb96d65 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
89 return 0; 89 return 0;
90} 90}
91 91
92static const char get_active_flag(struct drm_i915_gem_object *obj) 92static char get_active_flag(struct drm_i915_gem_object *obj)
93{ 93{
94 return obj->active ? '*' : ' '; 94 return obj->active ? '*' : ' ';
95} 95}
96 96
97static const char get_pin_flag(struct drm_i915_gem_object *obj) 97static char get_pin_flag(struct drm_i915_gem_object *obj)
98{ 98{
99 return obj->pin_display ? 'p' : ' '; 99 return obj->pin_display ? 'p' : ' ';
100} 100}
101 101
102static const char get_tiling_flag(struct drm_i915_gem_object *obj) 102static char get_tiling_flag(struct drm_i915_gem_object *obj)
103{ 103{
104 switch (obj->tiling_mode) { 104 switch (obj->tiling_mode) {
105 default: 105 default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
109 } 109 }
110} 110}
111 111
112static inline const char get_global_flag(struct drm_i915_gem_object *obj) 112static char get_global_flag(struct drm_i915_gem_object *obj)
113{ 113{
114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; 114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
115} 115}
116 116
117static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 117static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
118{ 118{
119 return obj->mapping ? 'M' : ' '; 119 return obj->mapping ? 'M' : ' ';
120} 120}
@@ -607,18 +607,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
607 for_each_intel_crtc(dev, crtc) { 607 for_each_intel_crtc(dev, crtc) {
608 const char pipe = pipe_name(crtc->pipe); 608 const char pipe = pipe_name(crtc->pipe);
609 const char plane = plane_name(crtc->plane); 609 const char plane = plane_name(crtc->plane);
610 struct intel_unpin_work *work; 610 struct intel_flip_work *work;
611 611
612 spin_lock_irq(&dev->event_lock); 612 spin_lock_irq(&dev->event_lock);
613 work = crtc->unpin_work; 613 work = crtc->flip_work;
614 if (work == NULL) { 614 if (work == NULL) {
615 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 615 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
616 pipe, plane); 616 pipe, plane);
617 } else { 617 } else {
618 u32 pending;
618 u32 addr; 619 u32 addr;
619 620
620 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 621 pending = atomic_read(&work->pending);
621 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 622 if (pending) {
623 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
622 pipe, plane); 624 pipe, plane);
623 } else { 625 } else {
624 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 626 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -638,11 +640,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
638 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 640 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
639 work->flip_queued_vblank, 641 work->flip_queued_vblank,
640 work->flip_ready_vblank, 642 work->flip_ready_vblank,
641 drm_crtc_vblank_count(&crtc->base)); 643 intel_crtc_get_vblank_counter(crtc));
642 if (work->enable_stall_check)
643 seq_puts(m, "Stall check enabled, ");
644 else
645 seq_puts(m, "Stall check waiting for page flip ioctl, ");
646 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 644 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
647 645
648 if (INTEL_INFO(dev)->gen >= 4) 646 if (INTEL_INFO(dev)->gen >= 4)
@@ -1383,7 +1381,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1383 seqno[id] = engine->get_seqno(engine); 1381 seqno[id] = engine->get_seqno(engine);
1384 } 1382 }
1385 1383
1386 i915_get_extra_instdone(dev, instdone); 1384 i915_get_extra_instdone(dev_priv, instdone);
1387 1385
1388 intel_runtime_pm_put(dev_priv); 1386 intel_runtime_pm_put(dev_priv);
1389 1387
@@ -2004,7 +2002,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
2004 ctx->legacy_hw_ctx.rcs_state == NULL) 2002 ctx->legacy_hw_ctx.rcs_state == NULL)
2005 continue; 2003 continue;
2006 2004
2007 seq_puts(m, "HW context "); 2005 seq_printf(m, "HW context %u ", ctx->hw_id);
2008 describe_ctx(m, ctx); 2006 describe_ctx(m, ctx);
2009 if (ctx == dev_priv->kernel_context) 2007 if (ctx == dev_priv->kernel_context)
2010 seq_printf(m, "(kernel context) "); 2008 seq_printf(m, "(kernel context) ");
@@ -2046,15 +2044,13 @@ static void i915_dump_lrc_obj(struct seq_file *m,
2046 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; 2044 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2047 unsigned long ggtt_offset = 0; 2045 unsigned long ggtt_offset = 0;
2048 2046
2047 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
2048
2049 if (ctx_obj == NULL) { 2049 if (ctx_obj == NULL) {
2050 seq_printf(m, "Context on %s with no gem object\n", 2050 seq_puts(m, "\tNot allocated\n");
2051 engine->name);
2052 return; 2051 return;
2053 } 2052 }
2054 2053
2055 seq_printf(m, "CONTEXT: %s %u\n", engine->name,
2056 intel_execlists_ctx_id(ctx, engine));
2057
2058 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2054 if (!i915_gem_obj_ggtt_bound(ctx_obj))
2059 seq_puts(m, "\tNot bound in GGTT\n"); 2055 seq_puts(m, "\tNot bound in GGTT\n");
2060 else 2056 else
@@ -2100,9 +2096,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2100 return ret; 2096 return ret;
2101 2097
2102 list_for_each_entry(ctx, &dev_priv->context_list, link) 2098 list_for_each_entry(ctx, &dev_priv->context_list, link)
2103 if (ctx != dev_priv->kernel_context) 2099 for_each_engine(engine, dev_priv)
2104 for_each_engine(engine, dev_priv) 2100 i915_dump_lrc_obj(m, ctx, engine);
2105 i915_dump_lrc_obj(m, ctx, engine);
2106 2101
2107 mutex_unlock(&dev->struct_mutex); 2102 mutex_unlock(&dev->struct_mutex);
2108 2103
@@ -2173,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data)
2173 2168
2174 seq_printf(m, "\t%d requests in queue\n", count); 2169 seq_printf(m, "\t%d requests in queue\n", count);
2175 if (head_req) { 2170 if (head_req) {
2176 seq_printf(m, "\tHead request id: %u\n", 2171 seq_printf(m, "\tHead request context: %u\n",
2177 intel_execlists_ctx_id(head_req->ctx, engine)); 2172 head_req->ctx->hw_id);
2178 seq_printf(m, "\tHead request tail: %u\n", 2173 seq_printf(m, "\tHead request tail: %u\n",
2179 head_req->tail); 2174 head_req->tail);
2180 } 2175 }
@@ -2313,12 +2308,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2313 struct drm_i915_private *dev_priv = dev->dev_private; 2308 struct drm_i915_private *dev_priv = dev->dev_private;
2314 struct intel_engine_cs *engine; 2309 struct intel_engine_cs *engine;
2315 2310
2316 if (INTEL_INFO(dev)->gen == 6) 2311 if (IS_GEN6(dev_priv))
2317 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2312 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2318 2313
2319 for_each_engine(engine, dev_priv) { 2314 for_each_engine(engine, dev_priv) {
2320 seq_printf(m, "%s\n", engine->name); 2315 seq_printf(m, "%s\n", engine->name);
2321 if (INTEL_INFO(dev)->gen == 7) 2316 if (IS_GEN7(dev_priv))
2322 seq_printf(m, "GFX_MODE: 0x%08x\n", 2317 seq_printf(m, "GFX_MODE: 0x%08x\n",
2323 I915_READ(RING_MODE_GEN7(engine))); 2318 I915_READ(RING_MODE_GEN7(engine)));
2324 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2319 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -3168,7 +3163,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3168 enum intel_engine_id id; 3163 enum intel_engine_id id;
3169 int j, ret; 3164 int j, ret;
3170 3165
3171 if (!i915_semaphore_is_enabled(dev)) { 3166 if (!i915_semaphore_is_enabled(dev_priv)) {
3172 seq_puts(m, "Semaphores are disabled\n"); 3167 seq_puts(m, "Semaphores are disabled\n");
3173 return 0; 3168 return 0;
3174 } 3169 }
@@ -4769,7 +4764,7 @@ i915_wedged_set(void *data, u64 val)
4769 4764
4770 intel_runtime_pm_get(dev_priv); 4765 intel_runtime_pm_get(dev_priv);
4771 4766
4772 i915_handle_error(dev, val, 4767 i915_handle_error(dev_priv, val,
4773 "Manually setting wedged to %llu", val); 4768 "Manually setting wedged to %llu", val);
4774 4769
4775 intel_runtime_pm_put(dev_priv); 4770 intel_runtime_pm_put(dev_priv);
@@ -4919,7 +4914,7 @@ i915_drop_caches_set(void *data, u64 val)
4919 } 4914 }
4920 4915
4921 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4916 if (val & (DROP_RETIRE | DROP_ACTIVE))
4922 i915_gem_retire_requests(dev); 4917 i915_gem_retire_requests(dev_priv);
4923 4918
4924 if (val & DROP_BOUND) 4919 if (val & DROP_BOUND)
4925 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4920 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4993,7 +4988,7 @@ i915_max_freq_set(void *data, u64 val)
4993 4988
4994 dev_priv->rps.max_freq_softlimit = val; 4989 dev_priv->rps.max_freq_softlimit = val;
4995 4990
4996 intel_set_rps(dev, val); 4991 intel_set_rps(dev_priv, val);
4997 4992
4998 mutex_unlock(&dev_priv->rps.hw_lock); 4993 mutex_unlock(&dev_priv->rps.hw_lock);
4999 4994
@@ -5060,7 +5055,7 @@ i915_min_freq_set(void *data, u64 val)
5060 5055
5061 dev_priv->rps.min_freq_softlimit = val; 5056 dev_priv->rps.min_freq_softlimit = val;
5062 5057
5063 intel_set_rps(dev, val); 5058 intel_set_rps(dev_priv, val);
5064 5059
5065 mutex_unlock(&dev_priv->rps.hw_lock); 5060 mutex_unlock(&dev_priv->rps.hw_lock);
5066 5061
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b3198fcd0536..fd06bff216ff 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
186 value = 1; 186 value = 1;
187 break; 187 break;
188 case I915_PARAM_HAS_SEMAPHORES: 188 case I915_PARAM_HAS_SEMAPHORES:
189 value = i915_semaphore_is_enabled(dev); 189 value = i915_semaphore_is_enabled(dev_priv);
190 break; 190 break;
191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
192 value = 1; 192 value = 1;
@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
204 value = 1; 204 value = 1;
205 break; 205 break;
206 case I915_PARAM_CMD_PARSER_VERSION: 206 case I915_PARAM_CMD_PARSER_VERSION:
207 value = i915_cmd_parser_get_version(); 207 value = i915_cmd_parser_get_version(dev_priv);
208 break; 208 break;
209 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 209 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
210 value = 1; 210 value = 1;
@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
223 return -ENODEV; 223 return -ENODEV;
224 break; 224 break;
225 case I915_PARAM_HAS_GPU_RESET: 225 case I915_PARAM_HAS_GPU_RESET:
226 value = i915.enable_hangcheck && 226 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
227 intel_has_gpu_reset(dev);
228 break; 227 break;
229 case I915_PARAM_HAS_RESOURCE_STREAMER: 228 case I915_PARAM_HAS_RESOURCE_STREAMER:
230 value = HAS_RESOURCE_STREAMER(dev); 229 value = HAS_RESOURCE_STREAMER(dev);
@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
425 .can_switch = i915_switcheroo_can_switch, 424 .can_switch = i915_switcheroo_can_switch,
426}; 425};
427 426
427static void i915_gem_fini(struct drm_device *dev)
428{
429 struct drm_i915_private *dev_priv = to_i915(dev);
430
431 /*
432 * Neither the BIOS, ourselves or any other kernel
433 * expects the system to be in execlists mode on startup,
434 * so we need to reset the GPU back to legacy mode. And the only
435 * known way to disable logical contexts is through a GPU reset.
436 *
437 * So in order to leave the system in a known default configuration,
438 * always reset the GPU upon unload. Afterwards we then clean up the
439 * GEM state tracking, flushing off the requests and leaving the
440 * system in a known idle state.
441 *
442 * Note that is of the upmost importance that the GPU is idle and
443 * all stray writes are flushed *before* we dismantle the backing
444 * storage for the pinned objects.
445 *
446 * However, since we are uncertain that reseting the GPU on older
447 * machines is a good idea, we don't - just in case it leaves the
448 * machine in an unusable condition.
449 */
450 if (HAS_HW_CONTEXTS(dev)) {
451 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
452 WARN_ON(reset && reset != -ENODEV);
453 }
454
455 mutex_lock(&dev->struct_mutex);
456 i915_gem_reset(dev);
457 i915_gem_cleanup_engines(dev);
458 i915_gem_context_fini(dev);
459 mutex_unlock(&dev->struct_mutex);
460
461 WARN_ON(!list_empty(&to_i915(dev)->context_list));
462}
463
428static int i915_load_modeset_init(struct drm_device *dev) 464static int i915_load_modeset_init(struct drm_device *dev)
429{ 465{
430 struct drm_i915_private *dev_priv = dev->dev_private; 466 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
454 if (ret) 490 if (ret)
455 goto cleanup_vga_client; 491 goto cleanup_vga_client;
456 492
493 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
494 intel_update_rawclk(dev_priv);
495
457 intel_power_domains_init_hw(dev_priv, false); 496 intel_power_domains_init_hw(dev_priv, false);
458 497
459 intel_csr_ucode_init(dev_priv); 498 intel_csr_ucode_init(dev_priv);
@@ -503,10 +542,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
503 return 0; 542 return 0;
504 543
505cleanup_gem: 544cleanup_gem:
506 mutex_lock(&dev->struct_mutex); 545 i915_gem_fini(dev);
507 i915_gem_cleanup_engines(dev);
508 i915_gem_context_fini(dev);
509 mutex_unlock(&dev->struct_mutex);
510cleanup_irq: 546cleanup_irq:
511 intel_guc_ucode_fini(dev); 547 intel_guc_ucode_fini(dev);
512 drm_irq_uninstall(dev); 548 drm_irq_uninstall(dev);
@@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
850 DRM_INFO("Display disabled (module parameter)\n"); 886 DRM_INFO("Display disabled (module parameter)\n");
851 info->num_pipes = 0; 887 info->num_pipes = 0;
852 } else if (info->num_pipes > 0 && 888 } else if (info->num_pipes > 0 &&
853 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && 889 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
854 HAS_PCH_SPLIT(dev)) { 890 HAS_PCH_SPLIT(dev)) {
855 u32 fuse_strap = I915_READ(FUSE_STRAP); 891 u32 fuse_strap = I915_READ(FUSE_STRAP);
856 u32 sfuse_strap = I915_READ(SFUSE_STRAP); 892 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
874 DRM_INFO("PipeC fused off\n"); 910 DRM_INFO("PipeC fused off\n");
875 info->num_pipes -= 1; 911 info->num_pipes -= 1;
876 } 912 }
877 } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { 913 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
878 u32 dfsm = I915_READ(SKL_DFSM); 914 u32 dfsm = I915_READ(SKL_DFSM);
879 u8 disabled_mask = 0; 915 u8 disabled_mask = 0;
880 bool invalid; 916 bool invalid;
@@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
915 else if (INTEL_INFO(dev)->gen >= 9) 951 else if (INTEL_INFO(dev)->gen >= 9)
916 gen9_sseu_info_init(dev); 952 gen9_sseu_info_init(dev);
917 953
918 /* Snooping is broken on BXT A stepping. */
919 info->has_snoop = !info->has_llc; 954 info->has_snoop = !info->has_llc;
920 info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); 955
956 /* Snooping is broken on BXT A stepping. */
957 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
958 info->has_snoop = false;
921 959
922 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); 960 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
923 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); 961 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
@@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
930 info->has_subslice_pg ? "y" : "n"); 968 info->has_subslice_pg ? "y" : "n");
931 DRM_DEBUG_DRIVER("has EU power gating: %s\n", 969 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
932 info->has_eu_pg ? "y" : "n"); 970 info->has_eu_pg ? "y" : "n");
971
972 i915.enable_execlists =
973 intel_sanitize_enable_execlists(dev_priv,
974 i915.enable_execlists);
975
976 /*
977 * i915.enable_ppgtt is read-only, so do an early pass to validate the
978 * user's requested state against the hardware/driver capabilities. We
979 * do this now so that we can print out any log messages once rather
980 * than every time we check intel_enable_ppgtt().
981 */
982 i915.enable_ppgtt =
983 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
984 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
933} 985}
934 986
935static void intel_init_dpio(struct drm_i915_private *dev_priv) 987static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1020 memcpy(device_info, info, sizeof(dev_priv->info)); 1072 memcpy(device_info, info, sizeof(dev_priv->info));
1021 device_info->device_id = dev->pdev->device; 1073 device_info->device_id = dev->pdev->device;
1022 1074
1075 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
1076 device_info->gen_mask = BIT(device_info->gen - 1);
1077
1023 spin_lock_init(&dev_priv->irq_lock); 1078 spin_lock_init(&dev_priv->irq_lock);
1024 spin_lock_init(&dev_priv->gpu_error.lock); 1079 spin_lock_init(&dev_priv->gpu_error.lock);
1025 mutex_init(&dev_priv->backlight_lock); 1080 mutex_init(&dev_priv->backlight_lock);
@@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1137 if (ret < 0) 1192 if (ret < 0)
1138 goto put_bridge; 1193 goto put_bridge;
1139 1194
1140 intel_uncore_init(dev); 1195 intel_uncore_init(dev_priv);
1141 1196
1142 return 0; 1197 return 0;
1143 1198
@@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1155{ 1210{
1156 struct drm_device *dev = dev_priv->dev; 1211 struct drm_device *dev = dev_priv->dev;
1157 1212
1158 intel_uncore_fini(dev); 1213 intel_uncore_fini(dev_priv);
1159 i915_mmio_cleanup(dev); 1214 i915_mmio_cleanup(dev);
1160 pci_dev_put(dev_priv->bridge_dev); 1215 pci_dev_put(dev_priv->bridge_dev);
1161} 1216}
@@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1206 pci_set_master(dev->pdev); 1261 pci_set_master(dev->pdev);
1207 1262
1208 /* overlay on gen2 is broken and can't address above 1G */ 1263 /* overlay on gen2 is broken and can't address above 1G */
1209 if (IS_GEN2(dev)) 1264 if (IS_GEN2(dev)) {
1210 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1265 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1266 if (ret) {
1267 DRM_ERROR("failed to set DMA mask\n");
1268
1269 goto out_ggtt;
1270 }
1271 }
1272
1211 1273
1212 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1274 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1213 * using 32bit addressing, overwriting memory if HWS is located 1275 * using 32bit addressing, overwriting memory if HWS is located
@@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1217 * behaviour if any general state is accessed within a page above 4GB, 1279 * behaviour if any general state is accessed within a page above 4GB,
1218 * which also needs to be handled carefully. 1280 * which also needs to be handled carefully.
1219 */ 1281 */
1220 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1282 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
1221 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1283 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1284
1285 if (ret) {
1286 DRM_ERROR("failed to set DMA mask\n");
1287
1288 goto out_ggtt;
1289 }
1290 }
1222 1291
1223 aperture_size = ggtt->mappable_end; 1292 aperture_size = ggtt->mappable_end;
1224 1293
@@ -1236,7 +1305,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1236 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1305 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1237 PM_QOS_DEFAULT_VALUE); 1306 PM_QOS_DEFAULT_VALUE);
1238 1307
1239 intel_uncore_sanitize(dev); 1308 intel_uncore_sanitize(dev_priv);
1240 1309
1241 intel_opregion_setup(dev); 1310 intel_opregion_setup(dev);
1242 1311
@@ -1300,7 +1369,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
1300 * Notify a valid surface after modesetting, 1369 * Notify a valid surface after modesetting,
1301 * when running inside a VM. 1370 * when running inside a VM.
1302 */ 1371 */
1303 if (intel_vgpu_active(dev)) 1372 if (intel_vgpu_active(dev_priv))
1304 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 1373 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1305 1374
1306 i915_setup_sysfs(dev); 1375 i915_setup_sysfs(dev);
@@ -1459,10 +1528,7 @@ int i915_driver_unload(struct drm_device *dev)
1459 flush_workqueue(dev_priv->wq); 1528 flush_workqueue(dev_priv->wq);
1460 1529
1461 intel_guc_ucode_fini(dev); 1530 intel_guc_ucode_fini(dev);
1462 mutex_lock(&dev->struct_mutex); 1531 i915_gem_fini(dev);
1463 i915_gem_cleanup_engines(dev);
1464 i915_gem_context_fini(dev);
1465 mutex_unlock(&dev->struct_mutex);
1466 intel_fbc_cleanup_cfb(dev_priv); 1532 intel_fbc_cleanup_cfb(dev_priv);
1467 1533
1468 intel_power_domains_fini(dev_priv); 1534 intel_power_domains_fini(dev_priv);
@@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = {
1570 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 1636 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1571 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1637 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1572 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 1638 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1573 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), 1639 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1574 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), 1640 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1575 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 1641 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1576 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 1642 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1577 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1643 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1578 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 1644 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1579 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 1645 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1580 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 1646 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1581 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), 1647 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1582 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 1648 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1583 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 1649 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1584 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 1650 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e264a90d1b0d..61bf5a92040d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -298,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = {
298static const struct intel_device_info intel_broadwell_d_info = { 298static const struct intel_device_info intel_broadwell_d_info = {
299 BDW_FEATURES, 299 BDW_FEATURES,
300 .gen = 8, 300 .gen = 8,
301 .is_broadwell = 1,
301}; 302};
302 303
303static const struct intel_device_info intel_broadwell_m_info = { 304static const struct intel_device_info intel_broadwell_m_info = {
304 BDW_FEATURES, 305 BDW_FEATURES,
305 .gen = 8, .is_mobile = 1, 306 .gen = 8, .is_mobile = 1,
307 .is_broadwell = 1,
306}; 308};
307 309
308static const struct intel_device_info intel_broadwell_gt3d_info = { 310static const struct intel_device_info intel_broadwell_gt3d_info = {
309 BDW_FEATURES, 311 BDW_FEATURES,
310 .gen = 8, 312 .gen = 8,
313 .is_broadwell = 1,
311 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 314 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
312}; 315};
313 316
314static const struct intel_device_info intel_broadwell_gt3m_info = { 317static const struct intel_device_info intel_broadwell_gt3m_info = {
315 BDW_FEATURES, 318 BDW_FEATURES,
316 .gen = 8, .is_mobile = 1, 319 .gen = 8, .is_mobile = 1,
320 .is_broadwell = 1,
317 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 321 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
318}; 322};
319 323
@@ -528,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev)
528 pci_dev_put(pch); 532 pci_dev_put(pch);
529} 533}
530 534
531bool i915_semaphore_is_enabled(struct drm_device *dev) 535bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
532{ 536{
533 if (INTEL_INFO(dev)->gen < 6) 537 if (INTEL_GEN(dev_priv) < 6)
534 return false; 538 return false;
535 539
536 if (i915.semaphores >= 0) 540 if (i915.semaphores >= 0)
@@ -540,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
540 if (i915.enable_execlists) 544 if (i915.enable_execlists)
541 return false; 545 return false;
542 546
543 /* Until we get further testing... */
544 if (IS_GEN8(dev))
545 return false;
546
547#ifdef CONFIG_INTEL_IOMMU 547#ifdef CONFIG_INTEL_IOMMU
548 /* Enable semaphores on SNB when IO remapping is off */ 548 /* Enable semaphores on SNB when IO remapping is off */
549 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 549 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
550 return false; 550 return false;
551#endif 551#endif
552 552
@@ -608,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev)
608 608
609 intel_guc_suspend(dev); 609 intel_guc_suspend(dev);
610 610
611 intel_suspend_gt_powersave(dev); 611 intel_suspend_gt_powersave(dev_priv);
612 612
613 intel_display_suspend(dev); 613 intel_display_suspend(dev);
614 614
@@ -628,7 +628,7 @@ static int i915_drm_suspend(struct drm_device *dev)
628 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 628 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
629 intel_opregion_notify_adapter(dev, opregion_target_state); 629 intel_opregion_notify_adapter(dev, opregion_target_state);
630 630
631 intel_uncore_forcewake_reset(dev, false); 631 intel_uncore_forcewake_reset(dev_priv, false);
632 intel_opregion_fini(dev); 632 intel_opregion_fini(dev);
633 633
634 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 634 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -775,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev)
775 775
776 spin_lock_irq(&dev_priv->irq_lock); 776 spin_lock_irq(&dev_priv->irq_lock);
777 if (dev_priv->display.hpd_irq_setup) 777 if (dev_priv->display.hpd_irq_setup)
778 dev_priv->display.hpd_irq_setup(dev); 778 dev_priv->display.hpd_irq_setup(dev_priv);
779 spin_unlock_irq(&dev_priv->irq_lock); 779 spin_unlock_irq(&dev_priv->irq_lock);
780 780
781 intel_dp_mst_resume(dev); 781 intel_dp_mst_resume(dev);
@@ -868,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
868 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 868 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
869 ret); 869 ret);
870 870
871 intel_uncore_early_sanitize(dev, true); 871 intel_uncore_early_sanitize(dev_priv, true);
872 872
873 if (IS_BROXTON(dev)) { 873 if (IS_BROXTON(dev_priv)) {
874 if (!dev_priv->suspended_to_idle) 874 if (!dev_priv->suspended_to_idle)
875 gen9_sanitize_dc_state(dev_priv); 875 gen9_sanitize_dc_state(dev_priv);
876 bxt_disable_dc9(dev_priv); 876 bxt_disable_dc9(dev_priv);
@@ -878,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
878 hsw_disable_pc8(dev_priv); 878 hsw_disable_pc8(dev_priv);
879 } 879 }
880 880
881 intel_uncore_sanitize(dev); 881 intel_uncore_sanitize(dev_priv);
882 882
883 if (IS_BROXTON(dev_priv) || 883 if (IS_BROXTON(dev_priv) ||
884 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 884 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -921,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
921 * - re-init interrupt state 921 * - re-init interrupt state
922 * - re-init display 922 * - re-init display
923 */ 923 */
924int i915_reset(struct drm_device *dev) 924int i915_reset(struct drm_i915_private *dev_priv)
925{ 925{
926 struct drm_i915_private *dev_priv = dev->dev_private; 926 struct drm_device *dev = dev_priv->dev;
927 struct i915_gpu_error *error = &dev_priv->gpu_error; 927 struct i915_gpu_error *error = &dev_priv->gpu_error;
928 unsigned reset_counter; 928 unsigned reset_counter;
929 int ret; 929 int ret;
930 930
931 intel_reset_gt_powersave(dev); 931 intel_reset_gt_powersave(dev_priv);
932 932
933 mutex_lock(&dev->struct_mutex); 933 mutex_lock(&dev->struct_mutex);
934 934
@@ -944,7 +944,7 @@ int i915_reset(struct drm_device *dev)
944 944
945 i915_gem_reset(dev); 945 i915_gem_reset(dev);
946 946
947 ret = intel_gpu_reset(dev, ALL_ENGINES); 947 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
948 948
949 /* Also reset the gpu hangman. */ 949 /* Also reset the gpu hangman. */
950 if (error->stop_rings != 0) { 950 if (error->stop_rings != 0) {
@@ -999,7 +999,7 @@ int i915_reset(struct drm_device *dev)
999 * of re-init after reset. 999 * of re-init after reset.
1000 */ 1000 */
1001 if (INTEL_INFO(dev)->gen > 5) 1001 if (INTEL_INFO(dev)->gen > 5)
1002 intel_enable_gt_powersave(dev); 1002 intel_enable_gt_powersave(dev_priv);
1003 1003
1004 return 0; 1004 return 0;
1005 1005
@@ -1107,6 +1107,49 @@ static int i915_pm_resume(struct device *dev)
1107 return i915_drm_resume(drm_dev); 1107 return i915_drm_resume(drm_dev);
1108} 1108}
1109 1109
1110/* freeze: before creating the hibernation_image */
1111static int i915_pm_freeze(struct device *dev)
1112{
1113 return i915_pm_suspend(dev);
1114}
1115
1116static int i915_pm_freeze_late(struct device *dev)
1117{
1118 int ret;
1119
1120 ret = i915_pm_suspend_late(dev);
1121 if (ret)
1122 return ret;
1123
1124 ret = i915_gem_freeze_late(dev_to_i915(dev));
1125 if (ret)
1126 return ret;
1127
1128 return 0;
1129}
1130
1131/* thaw: called after creating the hibernation image, but before turning off. */
1132static int i915_pm_thaw_early(struct device *dev)
1133{
1134 return i915_pm_resume_early(dev);
1135}
1136
1137static int i915_pm_thaw(struct device *dev)
1138{
1139 return i915_pm_resume(dev);
1140}
1141
1142/* restore: called after loading the hibernation image. */
1143static int i915_pm_restore_early(struct device *dev)
1144{
1145 return i915_pm_resume_early(dev);
1146}
1147
1148static int i915_pm_restore(struct device *dev)
1149{
1150 return i915_pm_resume(dev);
1151}
1152
1110/* 1153/*
1111 * Save all Gunit registers that may be lost after a D3 and a subsequent 1154 * Save all Gunit registers that may be lost after a D3 and a subsequent
1112 * S0i[R123] transition. The list of registers needing a save/restore is 1155 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1470,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device)
1470 struct drm_i915_private *dev_priv = dev->dev_private; 1513 struct drm_i915_private *dev_priv = dev->dev_private;
1471 int ret; 1514 int ret;
1472 1515
1473 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1516 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
1474 return -ENODEV; 1517 return -ENODEV;
1475 1518
1476 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1519 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1509,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device)
1509 1552
1510 intel_guc_suspend(dev); 1553 intel_guc_suspend(dev);
1511 1554
1512 intel_suspend_gt_powersave(dev); 1555 intel_suspend_gt_powersave(dev_priv);
1513 intel_runtime_pm_disable_interrupts(dev_priv); 1556 intel_runtime_pm_disable_interrupts(dev_priv);
1514 1557
1515 ret = 0; 1558 ret = 0;
@@ -1531,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device)
1531 return ret; 1574 return ret;
1532 } 1575 }
1533 1576
1534 intel_uncore_forcewake_reset(dev, false); 1577 intel_uncore_forcewake_reset(dev_priv, false);
1535 1578
1536 enable_rpm_wakeref_asserts(dev_priv); 1579 enable_rpm_wakeref_asserts(dev_priv);
1537 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1580 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1612,7 +1655,7 @@ static int intel_runtime_resume(struct device *device)
1612 * we can do is to hope that things will still work (and disable RPM). 1655 * we can do is to hope that things will still work (and disable RPM).
1613 */ 1656 */
1614 i915_gem_init_swizzling(dev); 1657 i915_gem_init_swizzling(dev);
1615 gen6_update_ring_freq(dev); 1658 gen6_update_ring_freq(dev_priv);
1616 1659
1617 intel_runtime_pm_enable_interrupts(dev_priv); 1660 intel_runtime_pm_enable_interrupts(dev_priv);
1618 1661
@@ -1624,7 +1667,7 @@ static int intel_runtime_resume(struct device *device)
1624 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1667 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1625 intel_hpd_init(dev_priv); 1668 intel_hpd_init(dev_priv);
1626 1669
1627 intel_enable_gt_powersave(dev); 1670 intel_enable_gt_powersave(dev_priv);
1628 1671
1629 enable_rpm_wakeref_asserts(dev_priv); 1672 enable_rpm_wakeref_asserts(dev_priv);
1630 1673
@@ -1661,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = {
1661 * @restore, @restore_early : called after rebooting and restoring the 1704 * @restore, @restore_early : called after rebooting and restoring the
1662 * hibernation image [PMSG_RESTORE] 1705 * hibernation image [PMSG_RESTORE]
1663 */ 1706 */
1664 .freeze = i915_pm_suspend, 1707 .freeze = i915_pm_freeze,
1665 .freeze_late = i915_pm_suspend_late, 1708 .freeze_late = i915_pm_freeze_late,
1666 .thaw_early = i915_pm_resume_early, 1709 .thaw_early = i915_pm_thaw_early,
1667 .thaw = i915_pm_resume, 1710 .thaw = i915_pm_thaw,
1668 .poweroff = i915_pm_suspend, 1711 .poweroff = i915_pm_suspend,
1669 .poweroff_late = i915_pm_poweroff_late, 1712 .poweroff_late = i915_pm_poweroff_late,
1670 .restore_early = i915_pm_resume_early, 1713 .restore_early = i915_pm_restore_early,
1671 .restore = i915_pm_resume, 1714 .restore = i915_pm_restore,
1672 1715
1673 /* S0ix (via runtime suspend) event handlers */ 1716 /* S0ix (via runtime suspend) event handlers */
1674 .runtime_suspend = intel_runtime_suspend, 1717 .runtime_suspend = intel_runtime_suspend,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5faacc6e548d..b4ea941d87f3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -66,7 +66,7 @@
66 66
67#define DRIVER_NAME "i915" 67#define DRIVER_NAME "i915"
68#define DRIVER_DESC "Intel Graphics" 68#define DRIVER_DESC "Intel Graphics"
69#define DRIVER_DATE "20160425" 69#define DRIVER_DATE "20160522"
70 70
71#undef WARN_ON 71#undef WARN_ON
72/* Many gcc seem to no see through this and fall over :( */ 72/* Many gcc seem to no see through this and fall over :( */
@@ -324,6 +324,12 @@ struct i915_hotplug {
324 &dev->mode_config.plane_list, \ 324 &dev->mode_config.plane_list, \
325 base.head) 325 base.head)
326 326
327#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
328 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
329 base.head) \
330 for_each_if ((plane_mask) & \
331 (1 << drm_plane_index(&intel_plane->base)))
332
327#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 333#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
328 list_for_each_entry(intel_plane, \ 334 list_for_each_entry(intel_plane, \
329 &(dev)->mode_config.plane_list, \ 335 &(dev)->mode_config.plane_list, \
@@ -333,6 +339,10 @@ struct i915_hotplug {
333#define for_each_intel_crtc(dev, intel_crtc) \ 339#define for_each_intel_crtc(dev, intel_crtc) \
334 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 340 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
335 341
342#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
343 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
344 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
345
336#define for_each_intel_encoder(dev, intel_encoder) \ 346#define for_each_intel_encoder(dev, intel_encoder) \
337 list_for_each_entry(intel_encoder, \ 347 list_for_each_entry(intel_encoder, \
338 &(dev)->mode_config.encoder_list, \ 348 &(dev)->mode_config.encoder_list, \
@@ -588,6 +598,7 @@ struct drm_i915_display_funcs {
588 struct intel_crtc_state *newstate); 598 struct intel_crtc_state *newstate);
589 void (*initial_watermarks)(struct intel_crtc_state *cstate); 599 void (*initial_watermarks)(struct intel_crtc_state *cstate);
590 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 600 void (*optimize_watermarks)(struct intel_crtc_state *cstate);
601 int (*compute_global_watermarks)(struct drm_atomic_state *state);
591 void (*update_wm)(struct drm_crtc *crtc); 602 void (*update_wm)(struct drm_crtc *crtc);
592 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 603 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
593 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 604 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -612,7 +623,7 @@ struct drm_i915_display_funcs {
612 struct drm_i915_gem_object *obj, 623 struct drm_i915_gem_object *obj,
613 struct drm_i915_gem_request *req, 624 struct drm_i915_gem_request *req,
614 uint32_t flags); 625 uint32_t flags);
615 void (*hpd_irq_setup)(struct drm_device *dev); 626 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
616 /* clock updates for mode set */ 627 /* clock updates for mode set */
617 /* cursor updates */ 628 /* cursor updates */
618 /* render clock increase/decrease */ 629 /* render clock increase/decrease */
@@ -735,6 +746,7 @@ struct intel_csr {
735 func(is_valleyview) sep \ 746 func(is_valleyview) sep \
736 func(is_cherryview) sep \ 747 func(is_cherryview) sep \
737 func(is_haswell) sep \ 748 func(is_haswell) sep \
749 func(is_broadwell) sep \
738 func(is_skylake) sep \ 750 func(is_skylake) sep \
739 func(is_broxton) sep \ 751 func(is_broxton) sep \
740 func(is_kabylake) sep \ 752 func(is_kabylake) sep \
@@ -757,9 +769,10 @@ struct intel_csr {
757struct intel_device_info { 769struct intel_device_info {
758 u32 display_mmio_offset; 770 u32 display_mmio_offset;
759 u16 device_id; 771 u16 device_id;
760 u8 num_pipes:3; 772 u8 num_pipes;
761 u8 num_sprites[I915_MAX_PIPES]; 773 u8 num_sprites[I915_MAX_PIPES];
762 u8 gen; 774 u8 gen;
775 u16 gen_mask;
763 u8 ring_mask; /* Rings supported by the HW */ 776 u8 ring_mask; /* Rings supported by the HW */
764 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 777 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
765 /* Register offsets for the various display pipes and transcoders */ 778 /* Register offsets for the various display pipes and transcoders */
@@ -851,6 +864,9 @@ struct intel_context {
851 struct i915_ctx_hang_stats hang_stats; 864 struct i915_ctx_hang_stats hang_stats;
852 struct i915_hw_ppgtt *ppgtt; 865 struct i915_hw_ppgtt *ppgtt;
853 866
867 /* Unique identifier for this context, used by the hw for tracking */
868 unsigned hw_id;
869
854 /* Legacy ring buffer submission */ 870 /* Legacy ring buffer submission */
855 struct { 871 struct {
856 struct drm_i915_gem_object *rcs_state; 872 struct drm_i915_gem_object *rcs_state;
@@ -865,6 +881,7 @@ struct intel_context {
865 struct i915_vma *lrc_vma; 881 struct i915_vma *lrc_vma;
866 u64 lrc_desc; 882 u64 lrc_desc;
867 uint32_t *lrc_reg_state; 883 uint32_t *lrc_reg_state;
884 bool initialised;
868 } engine[I915_NUM_ENGINES]; 885 } engine[I915_NUM_ENGINES];
869 886
870 struct list_head link; 887 struct list_head link;
@@ -1488,6 +1505,7 @@ struct intel_vbt_data {
1488 bool present; 1505 bool present;
1489 bool active_low_pwm; 1506 bool active_low_pwm;
1490 u8 min_brightness; /* min_brightness/255 of max */ 1507 u8 min_brightness; /* min_brightness/255 of max */
1508 enum intel_backlight_type type;
1491 } backlight; 1509 } backlight;
1492 1510
1493 /* MIPI DSI */ 1511 /* MIPI DSI */
@@ -1580,7 +1598,7 @@ struct skl_ddb_allocation {
1580}; 1598};
1581 1599
1582struct skl_wm_values { 1600struct skl_wm_values {
1583 bool dirty[I915_MAX_PIPES]; 1601 unsigned dirty_pipes;
1584 struct skl_ddb_allocation ddb; 1602 struct skl_ddb_allocation ddb;
1585 uint32_t wm_linetime[I915_MAX_PIPES]; 1603 uint32_t wm_linetime[I915_MAX_PIPES];
1586 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1604 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1838,6 +1856,13 @@ struct drm_i915_private {
1838 DECLARE_HASHTABLE(mm_structs, 7); 1856 DECLARE_HASHTABLE(mm_structs, 7);
1839 struct mutex mm_lock; 1857 struct mutex mm_lock;
1840 1858
1859 /* The hw wants to have a stable context identifier for the lifetime
1860 * of the context (for OA, PASID, faults, etc). This is limited
1861 * in execlists to 21 bits.
1862 */
1863 struct ida context_hw_ida;
1864#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1865
1841 /* Kernel Modesetting */ 1866 /* Kernel Modesetting */
1842 1867
1843 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1868 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1950,9 +1975,6 @@ struct drm_i915_private {
1950 */ 1975 */
1951 uint16_t skl_latency[8]; 1976 uint16_t skl_latency[8];
1952 1977
1953 /* Committed wm config */
1954 struct intel_wm_config config;
1955
1956 /* 1978 /*
1957 * The skl_wm_values structure is a bit too big for stack 1979 * The skl_wm_values structure is a bit too big for stack
1958 * allocation, so we keep the staging struct where we store 1980 * allocation, so we keep the staging struct where we store
@@ -1975,6 +1997,13 @@ struct drm_i915_private {
1975 * cstate->wm.need_postvbl_update. 1997 * cstate->wm.need_postvbl_update.
1976 */ 1998 */
1977 struct mutex wm_mutex; 1999 struct mutex wm_mutex;
2000
2001 /*
2002 * Set during HW readout of watermarks/DDB. Some platforms
2003 * need to know when we're still using BIOS-provided values
2004 * (which we don't fully trust).
2005 */
2006 bool distrust_bios_wm;
1978 } wm; 2007 } wm;
1979 2008
1980 struct i915_runtime_pm pm; 2009 struct i915_runtime_pm pm;
@@ -2227,9 +2256,75 @@ struct drm_i915_gem_object {
2227}; 2256};
2228#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2257#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2229 2258
2230void i915_gem_track_fb(struct drm_i915_gem_object *old, 2259/*
2231 struct drm_i915_gem_object *new, 2260 * Optimised SGL iterator for GEM objects
2232 unsigned frontbuffer_bits); 2261 */
2262static __always_inline struct sgt_iter {
2263 struct scatterlist *sgp;
2264 union {
2265 unsigned long pfn;
2266 dma_addr_t dma;
2267 };
2268 unsigned int curr;
2269 unsigned int max;
2270} __sgt_iter(struct scatterlist *sgl, bool dma) {
2271 struct sgt_iter s = { .sgp = sgl };
2272
2273 if (s.sgp) {
2274 s.max = s.curr = s.sgp->offset;
2275 s.max += s.sgp->length;
2276 if (dma)
2277 s.dma = sg_dma_address(s.sgp);
2278 else
2279 s.pfn = page_to_pfn(sg_page(s.sgp));
2280 }
2281
2282 return s;
2283}
2284
2285/**
2286 * __sg_next - return the next scatterlist entry in a list
2287 * @sg: The current sg entry
2288 *
2289 * Description:
2290 * If the entry is the last, return NULL; otherwise, step to the next
2291 * element in the array (@sg@+1). If that's a chain pointer, follow it;
2292 * otherwise just return the pointer to the current element.
2293 **/
2294static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2295{
2296#ifdef CONFIG_DEBUG_SG
2297 BUG_ON(sg->sg_magic != SG_MAGIC);
2298#endif
2299 return sg_is_last(sg) ? NULL :
2300 likely(!sg_is_chain(++sg)) ? sg :
2301 sg_chain_ptr(sg);
2302}
2303
2304/**
2305 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2306 * @__dmap: DMA address (output)
2307 * @__iter: 'struct sgt_iter' (iterator state, internal)
2308 * @__sgt: sg_table to iterate over (input)
2309 */
2310#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2311 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2312 ((__dmap) = (__iter).dma + (__iter).curr); \
2313 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2314 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
2315
2316/**
2317 * for_each_sgt_page - iterate over the pages of the given sg_table
2318 * @__pp: page pointer (output)
2319 * @__iter: 'struct sgt_iter' (iterator state, internal)
2320 * @__sgt: sg_table to iterate over (input)
2321 */
2322#define for_each_sgt_page(__pp, __iter, __sgt) \
2323 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2324 ((__pp) = (__iter).pfn == 0 ? NULL : \
2325 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2326 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2327 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
2233 2328
2234/** 2329/**
2235 * Request queue structure. 2330 * Request queue structure.
@@ -2278,6 +2373,9 @@ struct drm_i915_gem_request {
2278 /** Position in the ringbuffer of the end of the whole request */ 2373 /** Position in the ringbuffer of the end of the whole request */
2279 u32 tail; 2374 u32 tail;
2280 2375
2376 /** Preallocate space in the ringbuffer for the emitting the request */
2377 u32 reserved_space;
2378
2281 /** 2379 /**
2282 * Context and ring buffer related to this request 2380 * Context and ring buffer related to this request
2283 * Contexts are refcounted, so when this request is associated with a 2381 * Contexts are refcounted, so when this request is associated with a
@@ -2291,6 +2389,17 @@ struct drm_i915_gem_request {
2291 struct intel_context *ctx; 2389 struct intel_context *ctx;
2292 struct intel_ringbuffer *ringbuf; 2390 struct intel_ringbuffer *ringbuf;
2293 2391
2392 /**
2393 * Context related to the previous request.
2394 * As the contexts are accessed by the hardware until the switch is
2395 * completed to a new context, the hardware may still be writing
2396 * to the context object after the breadcrumb is visible. We must
2397 * not unpin/unbind/prune that object whilst still active and so
2398 * we keep the previous context pinned until the following (this)
2399 * request is retired.
2400 */
2401 struct intel_context *previous_context;
2402
2294 /** Batch buffer related to this request if any (used for 2403 /** Batch buffer related to this request if any (used for
2295 error state dump only) */ 2404 error state dump only) */
2296 struct drm_i915_gem_object *batch_obj; 2405 struct drm_i915_gem_object *batch_obj;
@@ -2327,6 +2436,8 @@ struct drm_i915_gem_request {
2327 /** Execlists no. of times this request has been sent to the ELSP */ 2436 /** Execlists no. of times this request has been sent to the ELSP */
2328 int elsp_submitted; 2437 int elsp_submitted;
2329 2438
2439 /** Execlists context hardware id. */
2440 unsigned ctx_hw_id;
2330}; 2441};
2331 2442
2332struct drm_i915_gem_request * __must_check 2443struct drm_i915_gem_request * __must_check
@@ -2359,23 +2470,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
2359static inline void 2470static inline void
2360i915_gem_request_unreference(struct drm_i915_gem_request *req) 2471i915_gem_request_unreference(struct drm_i915_gem_request *req)
2361{ 2472{
2362 WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
2363 kref_put(&req->ref, i915_gem_request_free); 2473 kref_put(&req->ref, i915_gem_request_free);
2364} 2474}
2365 2475
2366static inline void
2367i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
2368{
2369 struct drm_device *dev;
2370
2371 if (!req)
2372 return;
2373
2374 dev = req->engine->dev;
2375 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
2376 mutex_unlock(&dev->struct_mutex);
2377}
2378
2379static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2476static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
2380 struct drm_i915_gem_request *src) 2477 struct drm_i915_gem_request *src)
2381{ 2478{
@@ -2503,9 +2600,29 @@ struct drm_i915_cmd_table {
2503#define INTEL_INFO(p) (&__I915__(p)->info) 2600#define INTEL_INFO(p) (&__I915__(p)->info)
2504#define INTEL_GEN(p) (INTEL_INFO(p)->gen) 2601#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
2505#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2602#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2506#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2507 2603
2508#define REVID_FOREVER 0xff 2604#define REVID_FOREVER 0xff
2605#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2606
2607#define GEN_FOREVER (0)
2608/*
2609 * Returns true if Gen is in inclusive range [Start, End].
2610 *
2611 * Use GEN_FOREVER for unbound start and or end.
2612 */
2613#define IS_GEN(p, s, e) ({ \
2614 unsigned int __s = (s), __e = (e); \
2615 BUILD_BUG_ON(!__builtin_constant_p(s)); \
2616 BUILD_BUG_ON(!__builtin_constant_p(e)); \
2617 if ((__s) != GEN_FOREVER) \
2618 __s = (s) - 1; \
2619 if ((__e) == GEN_FOREVER) \
2620 __e = BITS_PER_LONG - 1; \
2621 else \
2622 __e = (e) - 1; \
2623 !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
2624})
2625
2509/* 2626/*
2510 * Return true if revision is in range [since,until] inclusive. 2627 * Return true if revision is in range [since,until] inclusive.
2511 * 2628 *
@@ -2538,7 +2655,7 @@ struct drm_i915_cmd_table {
2538#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2655#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
2539#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) 2656#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
2540#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2657#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2541#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) 2658#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
2542#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2659#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2543#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) 2660#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
2544#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) 2661#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@@ -2606,14 +2723,14 @@ struct drm_i915_cmd_table {
2606 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2723 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2607 * chips, etc.). 2724 * chips, etc.).
2608 */ 2725 */
2609#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2726#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1))
2610#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2727#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2))
2611#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2728#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3))
2612#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2729#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4))
2613#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2730#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5))
2614#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2731#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6))
2615#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2732#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7))
2616#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2733#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8))
2617 2734
2618#define RENDER_RING (1<<RCS) 2735#define RENDER_RING (1<<RCS)
2619#define BSD_RING (1<<VCS) 2736#define BSD_RING (1<<VCS)
@@ -2686,7 +2803,7 @@ struct drm_i915_cmd_table {
2686 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ 2803 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
2687 IS_KABYLAKE(dev) || IS_BROXTON(dev)) 2804 IS_KABYLAKE(dev) || IS_BROXTON(dev))
2688#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2805#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2689#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2806#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
2690 2807
2691#define HAS_CSR(dev) (IS_GEN9(dev)) 2808#define HAS_CSR(dev) (IS_GEN9(dev))
2692 2809
@@ -2740,6 +2857,9 @@ extern int i915_max_ioctl;
2740extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2857extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2741extern int i915_resume_switcheroo(struct drm_device *dev); 2858extern int i915_resume_switcheroo(struct drm_device *dev);
2742 2859
2860int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2861 int enable_ppgtt);
2862
2743/* i915_dma.c */ 2863/* i915_dma.c */
2744void __printf(3, 4) 2864void __printf(3, 4)
2745__i915_printk(struct drm_i915_private *dev_priv, const char *level, 2865__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -2760,9 +2880,9 @@ extern void i915_driver_postclose(struct drm_device *dev,
2760extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2880extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2761 unsigned long arg); 2881 unsigned long arg);
2762#endif 2882#endif
2763extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); 2883extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2764extern bool intel_has_gpu_reset(struct drm_device *dev); 2884extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2765extern int i915_reset(struct drm_device *dev); 2885extern int i915_reset(struct drm_i915_private *dev_priv);
2766extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2886extern int intel_guc_reset(struct drm_i915_private *dev_priv);
2767extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2887extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2768extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2888extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2772,30 +2892,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2772int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2892int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2773 2893
2774/* intel_hotplug.c */ 2894/* intel_hotplug.c */
2775void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2895void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2896 u32 pin_mask, u32 long_mask);
2776void intel_hpd_init(struct drm_i915_private *dev_priv); 2897void intel_hpd_init(struct drm_i915_private *dev_priv);
2777void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2898void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2778void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2899void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2779bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2900bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
2780 2901
2781/* i915_irq.c */ 2902/* i915_irq.c */
2782void i915_queue_hangcheck(struct drm_device *dev); 2903void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
2783__printf(3, 4) 2904__printf(3, 4)
2784void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2905void i915_handle_error(struct drm_i915_private *dev_priv,
2906 u32 engine_mask,
2785 const char *fmt, ...); 2907 const char *fmt, ...);
2786 2908
2787extern void intel_irq_init(struct drm_i915_private *dev_priv); 2909extern void intel_irq_init(struct drm_i915_private *dev_priv);
2788int intel_irq_install(struct drm_i915_private *dev_priv); 2910int intel_irq_install(struct drm_i915_private *dev_priv);
2789void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2911void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2790 2912
2791extern void intel_uncore_sanitize(struct drm_device *dev); 2913extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
2792extern void intel_uncore_early_sanitize(struct drm_device *dev, 2914extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
2793 bool restore_forcewake); 2915 bool restore_forcewake);
2794extern void intel_uncore_init(struct drm_device *dev); 2916extern void intel_uncore_init(struct drm_i915_private *dev_priv);
2795extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2917extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
2796extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2918extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
2797extern void intel_uncore_fini(struct drm_device *dev); 2919extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
2798extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2920extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
2921 bool restore);
2799const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2922const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2800void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2923void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2801 enum forcewake_domains domains); 2924 enum forcewake_domains domains);
@@ -2811,9 +2934,9 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
2811u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2934u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
2812 2935
2813void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2936void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2814static inline bool intel_vgpu_active(struct drm_device *dev) 2937static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2815{ 2938{
2816 return to_i915(dev)->vgpu.active; 2939 return dev_priv->vgpu.active;
2817} 2940}
2818 2941
2819void 2942void
@@ -2909,7 +3032,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
2909 struct drm_file *file_priv); 3032 struct drm_file *file_priv);
2910int i915_gem_get_tiling(struct drm_device *dev, void *data, 3033int i915_gem_get_tiling(struct drm_device *dev, void *data,
2911 struct drm_file *file_priv); 3034 struct drm_file *file_priv);
2912int i915_gem_init_userptr(struct drm_device *dev); 3035void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
2913int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3036int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2914 struct drm_file *file); 3037 struct drm_file *file);
2915int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3038int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2919,11 +3042,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2919void i915_gem_load_init(struct drm_device *dev); 3042void i915_gem_load_init(struct drm_device *dev);
2920void i915_gem_load_cleanup(struct drm_device *dev); 3043void i915_gem_load_cleanup(struct drm_device *dev);
2921void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3044void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3045int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3046
2922void *i915_gem_object_alloc(struct drm_device *dev); 3047void *i915_gem_object_alloc(struct drm_device *dev);
2923void i915_gem_object_free(struct drm_i915_gem_object *obj); 3048void i915_gem_object_free(struct drm_i915_gem_object *obj);
2924void i915_gem_object_init(struct drm_i915_gem_object *obj, 3049void i915_gem_object_init(struct drm_i915_gem_object *obj,
2925 const struct drm_i915_gem_object_ops *ops); 3050 const struct drm_i915_gem_object_ops *ops);
2926struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3051struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
2927 size_t size); 3052 size_t size);
2928struct drm_i915_gem_object *i915_gem_object_create_from_data( 3053struct drm_i915_gem_object *i915_gem_object_create_from_data(
2929 struct drm_device *dev, const void *data, size_t size); 3054 struct drm_device *dev, const void *data, size_t size);
@@ -3054,6 +3179,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
3054 struct drm_mode_create_dumb *args); 3179 struct drm_mode_create_dumb *args);
3055int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3180int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3056 uint32_t handle, uint64_t *offset); 3181 uint32_t handle, uint64_t *offset);
3182
3183void i915_gem_track_fb(struct drm_i915_gem_object *old,
3184 struct drm_i915_gem_object *new,
3185 unsigned frontbuffer_bits);
3186
3057/** 3187/**
3058 * Returns true if seq1 is later than seq2. 3188 * Returns true if seq1 is later than seq2.
3059 */ 3189 */
@@ -3081,13 +3211,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
3081 req->seqno); 3211 req->seqno);
3082} 3212}
3083 3213
3084int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 3214int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
3085int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3215int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
3086 3216
3087struct drm_i915_gem_request * 3217struct drm_i915_gem_request *
3088i915_gem_find_active_request(struct intel_engine_cs *engine); 3218i915_gem_find_active_request(struct intel_engine_cs *engine);
3089 3219
3090bool i915_gem_retire_requests(struct drm_device *dev); 3220bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3091void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3221void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
3092 3222
3093static inline u32 i915_reset_counter(struct i915_gpu_error *error) 3223static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3147,7 +3277,6 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
3147int __must_check i915_gem_init(struct drm_device *dev); 3277int __must_check i915_gem_init(struct drm_device *dev);
3148int i915_gem_init_engines(struct drm_device *dev); 3278int i915_gem_init_engines(struct drm_device *dev);
3149int __must_check i915_gem_init_hw(struct drm_device *dev); 3279int __must_check i915_gem_init_hw(struct drm_device *dev);
3150int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
3151void i915_gem_init_swizzling(struct drm_device *dev); 3280void i915_gem_init_swizzling(struct drm_device *dev);
3152void i915_gem_cleanup_engines(struct drm_device *dev); 3281void i915_gem_cleanup_engines(struct drm_device *dev);
3153int __must_check i915_gpu_idle(struct drm_device *dev); 3282int __must_check i915_gpu_idle(struct drm_device *dev);
@@ -3215,8 +3344,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
3215bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3344bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
3216 struct i915_address_space *vm); 3345 struct i915_address_space *vm);
3217 3346
3218unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
3219 struct i915_address_space *vm);
3220struct i915_vma * 3347struct i915_vma *
3221i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3348i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
3222 struct i915_address_space *vm); 3349 struct i915_address_space *vm);
@@ -3251,14 +3378,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
3251 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3378 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
3252} 3379}
3253 3380
3254static inline unsigned long 3381unsigned long
3255i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3382i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
3256{
3257 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3259
3260 return i915_gem_obj_size(obj, &ggtt->base);
3261}
3262 3383
3263static inline int __must_check 3384static inline int __must_check
3264i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3385i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3272,12 +3393,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
3272 alignment, flags | PIN_GLOBAL); 3393 alignment, flags | PIN_GLOBAL);
3273} 3394}
3274 3395
3275static inline int
3276i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
3277{
3278 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
3279}
3280
3281void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3396void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3282 const struct i915_ggtt_view *view); 3397 const struct i915_ggtt_view *view);
3283static inline void 3398static inline void
@@ -3301,10 +3416,10 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
3301 3416
3302/* i915_gem_context.c */ 3417/* i915_gem_context.c */
3303int __must_check i915_gem_context_init(struct drm_device *dev); 3418int __must_check i915_gem_context_init(struct drm_device *dev);
3419void i915_gem_context_lost(struct drm_i915_private *dev_priv);
3304void i915_gem_context_fini(struct drm_device *dev); 3420void i915_gem_context_fini(struct drm_device *dev);
3305void i915_gem_context_reset(struct drm_device *dev); 3421void i915_gem_context_reset(struct drm_device *dev);
3306int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3422int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
3307int i915_gem_context_enable(struct drm_i915_gem_request *req);
3308void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3423void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
3309int i915_switch_context(struct drm_i915_gem_request *req); 3424int i915_switch_context(struct drm_i915_gem_request *req);
3310struct intel_context * 3425struct intel_context *
@@ -3335,6 +3450,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
3335 struct drm_file *file_priv); 3450 struct drm_file *file_priv);
3336int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3451int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
3337 struct drm_file *file_priv); 3452 struct drm_file *file_priv);
3453int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
3454 struct drm_file *file);
3338 3455
3339/* i915_gem_evict.c */ 3456/* i915_gem_evict.c */
3340int __must_check i915_gem_evict_something(struct drm_device *dev, 3457int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3349,9 +3466,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
3349int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3466int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3350 3467
3351/* belongs in i915_gem_gtt.h */ 3468/* belongs in i915_gem_gtt.h */
3352static inline void i915_gem_chipset_flush(struct drm_device *dev) 3469static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3353{ 3470{
3354 if (INTEL_INFO(dev)->gen < 6) 3471 if (INTEL_GEN(dev_priv) < 6)
3355 intel_gtt_chipset_flush(); 3472 intel_gtt_chipset_flush();
3356} 3473}
3357 3474
@@ -3430,18 +3547,19 @@ static inline void i915_error_state_buf_release(
3430{ 3547{
3431 kfree(eb->buf); 3548 kfree(eb->buf);
3432} 3549}
3433void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 3550void i915_capture_error_state(struct drm_i915_private *dev_priv,
3551 u32 engine_mask,
3434 const char *error_msg); 3552 const char *error_msg);
3435void i915_error_state_get(struct drm_device *dev, 3553void i915_error_state_get(struct drm_device *dev,
3436 struct i915_error_state_file_priv *error_priv); 3554 struct i915_error_state_file_priv *error_priv);
3437void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3555void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3438void i915_destroy_error_state(struct drm_device *dev); 3556void i915_destroy_error_state(struct drm_device *dev);
3439 3557
3440void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3558void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
3441const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3559const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3442 3560
3443/* i915_cmd_parser.c */ 3561/* i915_cmd_parser.c */
3444int i915_cmd_parser_get_version(void); 3562int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3445int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); 3563int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
3446void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); 3564void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
3447bool i915_needs_cmd_parser(struct intel_engine_cs *engine); 3565bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3492,7 +3610,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3492extern int intel_opregion_setup(struct drm_device *dev); 3610extern int intel_opregion_setup(struct drm_device *dev);
3493extern void intel_opregion_init(struct drm_device *dev); 3611extern void intel_opregion_init(struct drm_device *dev);
3494extern void intel_opregion_fini(struct drm_device *dev); 3612extern void intel_opregion_fini(struct drm_device *dev);
3495extern void intel_opregion_asle_intr(struct drm_device *dev); 3613extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
3496extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3614extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3497 bool enable); 3615 bool enable);
3498extern int intel_opregion_notify_adapter(struct drm_device *dev, 3616extern int intel_opregion_notify_adapter(struct drm_device *dev,
@@ -3502,7 +3620,9 @@ extern int intel_opregion_get_panel_type(struct drm_device *dev);
3502static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3620static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
3503static inline void intel_opregion_init(struct drm_device *dev) { return; } 3621static inline void intel_opregion_init(struct drm_device *dev) { return; }
3504static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3622static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3505static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3623static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3624{
3625}
3506static inline int 3626static inline int
3507intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3627intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3508{ 3628{
@@ -3538,26 +3658,25 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3538extern void intel_display_resume(struct drm_device *dev); 3658extern void intel_display_resume(struct drm_device *dev);
3539extern void i915_redisable_vga(struct drm_device *dev); 3659extern void i915_redisable_vga(struct drm_device *dev);
3540extern void i915_redisable_vga_power_on(struct drm_device *dev); 3660extern void i915_redisable_vga_power_on(struct drm_device *dev);
3541extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3661extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3542extern void intel_init_pch_refclk(struct drm_device *dev); 3662extern void intel_init_pch_refclk(struct drm_device *dev);
3543extern void intel_set_rps(struct drm_device *dev, u8 val); 3663extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3544extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3664extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3545 bool enable); 3665 bool enable);
3546extern void intel_detect_pch(struct drm_device *dev); 3666extern void intel_detect_pch(struct drm_device *dev);
3547extern int intel_enable_rc6(const struct drm_device *dev);
3548 3667
3549extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3668extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
3550int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3669int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3551 struct drm_file *file); 3670 struct drm_file *file);
3552int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
3553 struct drm_file *file);
3554 3671
3555/* overlay */ 3672/* overlay */
3556extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3673extern struct intel_overlay_error_state *
3674intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3557extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3675extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3558 struct intel_overlay_error_state *error); 3676 struct intel_overlay_error_state *error);
3559 3677
3560extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3678extern struct intel_display_error_state *
3679intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3561extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3680extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3562 struct drm_device *dev, 3681 struct drm_device *dev,
3563 struct intel_display_error_state *error); 3682 struct intel_display_error_state *error);
@@ -3586,6 +3705,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3586u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3705u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3587void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3706void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3588 3707
3708/* intel_dpio_phy.c */
3709void chv_set_phy_signal_level(struct intel_encoder *encoder,
3710 u32 deemph_reg_value, u32 margin_reg_value,
3711 bool uniq_trans_scale);
3712void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3713 bool reset);
3714void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
3715void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3716void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3717void chv_phy_post_pll_disable(struct intel_encoder *encoder);
3718
3719void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3720 u32 demph_reg_value, u32 preemph_reg_value,
3721 u32 uniqtranscale_reg_value, u32 tx3_demph);
3722void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
3723void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3724void vlv_phy_reset_lanes(struct intel_encoder *encoder);
3725
3589int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3726int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3590int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3727int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3591 3728
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aad26851cee3..12407bc70c71 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 vaddr += PAGE_SIZE; 177 vaddr += PAGE_SIZE;
178 } 178 }
179 179
180 i915_gem_chipset_flush(obj->base.dev); 180 i915_gem_chipset_flush(to_i915(obj->base.dev));
181 181
182 st = kmalloc(sizeof(*st), GFP_KERNEL); 182 st = kmalloc(sizeof(*st), GFP_KERNEL);
183 if (st == NULL) 183 if (st == NULL)
@@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
347 } 347 }
348 348
349 drm_clflush_virt_range(vaddr, args->size); 349 drm_clflush_virt_range(vaddr, args->size);
350 i915_gem_chipset_flush(dev); 350 i915_gem_chipset_flush(to_i915(dev));
351 351
352out: 352out:
353 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 353 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -381,9 +381,9 @@ i915_gem_create(struct drm_file *file,
381 return -EINVAL; 381 return -EINVAL;
382 382
383 /* Allocate the new object */ 383 /* Allocate the new object */
384 obj = i915_gem_alloc_object(dev, size); 384 obj = i915_gem_object_create(dev, size);
385 if (obj == NULL) 385 if (IS_ERR(obj))
386 return -ENOMEM; 386 return PTR_ERR(obj);
387 387
388 ret = drm_gem_handle_create(file, &obj->base, &handle); 388 ret = drm_gem_handle_create(file, &obj->base, &handle);
389 /* drop reference from allocate - handle holds it now */ 389 /* drop reference from allocate - handle holds it now */
@@ -1006,7 +1006,7 @@ out:
1006 } 1006 }
1007 1007
1008 if (needs_clflush_after) 1008 if (needs_clflush_after)
1009 i915_gem_chipset_flush(dev); 1009 i915_gem_chipset_flush(to_i915(dev));
1010 else 1010 else
1011 obj->cache_dirty = true; 1011 obj->cache_dirty = true;
1012 1012
@@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1230 struct intel_rps_client *rps) 1230 struct intel_rps_client *rps)
1231{ 1231{
1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req); 1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1233 struct drm_device *dev = engine->dev; 1233 struct drm_i915_private *dev_priv = req->i915;
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 const bool irq_test_in_progress = 1234 const bool irq_test_in_progress =
1236 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); 1235 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1237 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1236 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1413,6 +1412,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1413 list_del_init(&request->list); 1412 list_del_init(&request->list);
1414 i915_gem_request_remove_from_client(request); 1413 i915_gem_request_remove_from_client(request);
1415 1414
1415 if (request->previous_context) {
1416 if (i915.enable_execlists)
1417 intel_lr_context_unpin(request->previous_context,
1418 request->engine);
1419 }
1420
1421 i915_gem_context_unreference(request->ctx);
1416 i915_gem_request_unreference(request); 1422 i915_gem_request_unreference(request);
1417} 1423}
1418 1424
@@ -1422,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1422 struct intel_engine_cs *engine = req->engine; 1428 struct intel_engine_cs *engine = req->engine;
1423 struct drm_i915_gem_request *tmp; 1429 struct drm_i915_gem_request *tmp;
1424 1430
1425 lockdep_assert_held(&engine->dev->struct_mutex); 1431 lockdep_assert_held(&engine->i915->dev->struct_mutex);
1426 1432
1427 if (list_empty(&req->list)) 1433 if (list_empty(&req->list))
1428 return; 1434 return;
@@ -1982,7 +1988,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1982 return size; 1988 return size;
1983 1989
1984 /* Previous chips need a power-of-two fence region when tiling */ 1990 /* Previous chips need a power-of-two fence region when tiling */
1985 if (INTEL_INFO(dev)->gen == 3) 1991 if (IS_GEN3(dev))
1986 gtt_size = 1024*1024; 1992 gtt_size = 1024*1024;
1987 else 1993 else
1988 gtt_size = 512*1024; 1994 gtt_size = 512*1024;
@@ -2162,7 +2168,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2162static void 2168static void
2163i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 2169i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2164{ 2170{
2165 struct sg_page_iter sg_iter; 2171 struct sgt_iter sgt_iter;
2172 struct page *page;
2166 int ret; 2173 int ret;
2167 2174
2168 BUG_ON(obj->madv == __I915_MADV_PURGED); 2175 BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2191,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2184 if (obj->madv == I915_MADV_DONTNEED) 2191 if (obj->madv == I915_MADV_DONTNEED)
2185 obj->dirty = 0; 2192 obj->dirty = 0;
2186 2193
2187 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 2194 for_each_sgt_page(page, sgt_iter, obj->pages) {
2188 struct page *page = sg_page_iter_page(&sg_iter);
2189
2190 if (obj->dirty) 2195 if (obj->dirty)
2191 set_page_dirty(page); 2196 set_page_dirty(page);
2192 2197
@@ -2243,7 +2248,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2243 struct address_space *mapping; 2248 struct address_space *mapping;
2244 struct sg_table *st; 2249 struct sg_table *st;
2245 struct scatterlist *sg; 2250 struct scatterlist *sg;
2246 struct sg_page_iter sg_iter; 2251 struct sgt_iter sgt_iter;
2247 struct page *page; 2252 struct page *page;
2248 unsigned long last_pfn = 0; /* suppress gcc warning */ 2253 unsigned long last_pfn = 0; /* suppress gcc warning */
2249 int ret; 2254 int ret;
@@ -2340,8 +2345,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2340 2345
2341err_pages: 2346err_pages:
2342 sg_mark_end(sg); 2347 sg_mark_end(sg);
2343 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2348 for_each_sgt_page(page, sgt_iter, st)
2344 put_page(sg_page_iter_page(&sg_iter)); 2349 put_page(page);
2345 sg_free_table(st); 2350 sg_free_table(st);
2346 kfree(st); 2351 kfree(st);
2347 2352
@@ -2395,6 +2400,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2395 return 0; 2400 return 0;
2396} 2401}
2397 2402
2403/* The 'mapping' part of i915_gem_object_pin_map() below */
2404static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2405{
2406 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2407 struct sg_table *sgt = obj->pages;
2408 struct sgt_iter sgt_iter;
2409 struct page *page;
2410 struct page *stack_pages[32];
2411 struct page **pages = stack_pages;
2412 unsigned long i = 0;
2413 void *addr;
2414
2415 /* A single page can always be kmapped */
2416 if (n_pages == 1)
2417 return kmap(sg_page(sgt->sgl));
2418
2419 if (n_pages > ARRAY_SIZE(stack_pages)) {
2420 /* Too big for stack -- allocate temporary array instead */
2421 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2422 if (!pages)
2423 return NULL;
2424 }
2425
2426 for_each_sgt_page(page, sgt_iter, sgt)
2427 pages[i++] = page;
2428
2429 /* Check that we have the expected number of pages */
2430 GEM_BUG_ON(i != n_pages);
2431
2432 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2433
2434 if (pages != stack_pages)
2435 drm_free_large(pages);
2436
2437 return addr;
2438}
2439
2440/* get, pin, and map the pages of the object into kernel space */
2398void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) 2441void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2399{ 2442{
2400 int ret; 2443 int ret;
@@ -2407,29 +2450,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2407 2450
2408 i915_gem_object_pin_pages(obj); 2451 i915_gem_object_pin_pages(obj);
2409 2452
2410 if (obj->mapping == NULL) { 2453 if (!obj->mapping) {
2411 struct page **pages; 2454 obj->mapping = i915_gem_object_map(obj);
2412 2455 if (!obj->mapping) {
2413 pages = NULL;
2414 if (obj->base.size == PAGE_SIZE)
2415 obj->mapping = kmap(sg_page(obj->pages->sgl));
2416 else
2417 pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
2418 sizeof(*pages),
2419 GFP_TEMPORARY);
2420 if (pages != NULL) {
2421 struct sg_page_iter sg_iter;
2422 int n;
2423
2424 n = 0;
2425 for_each_sg_page(obj->pages->sgl, &sg_iter,
2426 obj->pages->nents, 0)
2427 pages[n++] = sg_page_iter_page(&sg_iter);
2428
2429 obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
2430 drm_free_large(pages);
2431 }
2432 if (obj->mapping == NULL) {
2433 i915_gem_object_unpin_pages(obj); 2456 i915_gem_object_unpin_pages(obj);
2434 return ERR_PTR(-ENOMEM); 2457 return ERR_PTR(-ENOMEM);
2435 } 2458 }
@@ -2502,9 +2525,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2502} 2525}
2503 2526
2504static int 2527static int
2505i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2528i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2506{ 2529{
2507 struct drm_i915_private *dev_priv = dev->dev_private;
2508 struct intel_engine_cs *engine; 2530 struct intel_engine_cs *engine;
2509 int ret; 2531 int ret;
2510 2532
@@ -2514,7 +2536,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2514 if (ret) 2536 if (ret)
2515 return ret; 2537 return ret;
2516 } 2538 }
2517 i915_gem_retire_requests(dev); 2539 i915_gem_retire_requests(dev_priv);
2518 2540
2519 /* Finally reset hw state */ 2541 /* Finally reset hw state */
2520 for_each_engine(engine, dev_priv) 2542 for_each_engine(engine, dev_priv)
@@ -2534,7 +2556,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2534 /* HWS page needs to be set less than what we 2556 /* HWS page needs to be set less than what we
2535 * will inject to ring 2557 * will inject to ring
2536 */ 2558 */
2537 ret = i915_gem_init_seqno(dev, seqno - 1); 2559 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
2538 if (ret) 2560 if (ret)
2539 return ret; 2561 return ret;
2540 2562
@@ -2550,13 +2572,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2550} 2572}
2551 2573
2552int 2574int
2553i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 2575i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
2554{ 2576{
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2556
2557 /* reserve 0 for non-seqno */ 2577 /* reserve 0 for non-seqno */
2558 if (dev_priv->next_seqno == 0) { 2578 if (dev_priv->next_seqno == 0) {
2559 int ret = i915_gem_init_seqno(dev, 0); 2579 int ret = i915_gem_init_seqno(dev_priv, 0);
2560 if (ret) 2580 if (ret)
2561 return ret; 2581 return ret;
2562 2582
@@ -2580,6 +2600,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2580 struct drm_i915_private *dev_priv; 2600 struct drm_i915_private *dev_priv;
2581 struct intel_ringbuffer *ringbuf; 2601 struct intel_ringbuffer *ringbuf;
2582 u32 request_start; 2602 u32 request_start;
2603 u32 reserved_tail;
2583 int ret; 2604 int ret;
2584 2605
2585 if (WARN_ON(request == NULL)) 2606 if (WARN_ON(request == NULL))
@@ -2594,9 +2615,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2594 * should already have been reserved in the ring buffer. Let the ring 2615 * should already have been reserved in the ring buffer. Let the ring
2595 * know that it is time to use that space up. 2616 * know that it is time to use that space up.
2596 */ 2617 */
2597 intel_ring_reserved_space_use(ringbuf);
2598
2599 request_start = intel_ring_get_tail(ringbuf); 2618 request_start = intel_ring_get_tail(ringbuf);
2619 reserved_tail = request->reserved_space;
2620 request->reserved_space = 0;
2621
2600 /* 2622 /*
2601 * Emit any outstanding flushes - execbuf can fail to emit the flush 2623 * Emit any outstanding flushes - execbuf can fail to emit the flush
2602 * after having emitted the batchbuffer command. Hence we need to fix 2624 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2652,15 +2674,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2652 /* Not allowed to fail! */ 2674 /* Not allowed to fail! */
2653 WARN(ret, "emit|add_request failed: %d!\n", ret); 2675 WARN(ret, "emit|add_request failed: %d!\n", ret);
2654 2676
2655 i915_queue_hangcheck(engine->dev); 2677 i915_queue_hangcheck(engine->i915);
2656 2678
2657 queue_delayed_work(dev_priv->wq, 2679 queue_delayed_work(dev_priv->wq,
2658 &dev_priv->mm.retire_work, 2680 &dev_priv->mm.retire_work,
2659 round_jiffies_up_relative(HZ)); 2681 round_jiffies_up_relative(HZ));
2660 intel_mark_busy(dev_priv->dev); 2682 intel_mark_busy(dev_priv);
2661 2683
2662 /* Sanity check that the reserved size was large enough. */ 2684 /* Sanity check that the reserved size was large enough. */
2663 intel_ring_reserved_space_end(ringbuf); 2685 ret = intel_ring_get_tail(ringbuf) - request_start;
2686 if (ret < 0)
2687 ret += ringbuf->size;
2688 WARN_ONCE(ret > reserved_tail,
2689 "Not enough space reserved (%d bytes) "
2690 "for adding the request (%d bytes)\n",
2691 reserved_tail, ret);
2664} 2692}
2665 2693
2666static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2694static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2712,18 +2740,6 @@ void i915_gem_request_free(struct kref *req_ref)
2712{ 2740{
2713 struct drm_i915_gem_request *req = container_of(req_ref, 2741 struct drm_i915_gem_request *req = container_of(req_ref,
2714 typeof(*req), ref); 2742 typeof(*req), ref);
2715 struct intel_context *ctx = req->ctx;
2716
2717 if (req->file_priv)
2718 i915_gem_request_remove_from_client(req);
2719
2720 if (ctx) {
2721 if (i915.enable_execlists && ctx != req->i915->kernel_context)
2722 intel_lr_context_unpin(ctx, req->engine);
2723
2724 i915_gem_context_unreference(ctx);
2725 }
2726
2727 kmem_cache_free(req->i915->requests, req); 2743 kmem_cache_free(req->i915->requests, req);
2728} 2744}
2729 2745
@@ -2732,7 +2748,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2732 struct intel_context *ctx, 2748 struct intel_context *ctx,
2733 struct drm_i915_gem_request **req_out) 2749 struct drm_i915_gem_request **req_out)
2734{ 2750{
2735 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2751 struct drm_i915_private *dev_priv = engine->i915;
2736 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); 2752 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2737 struct drm_i915_gem_request *req; 2753 struct drm_i915_gem_request *req;
2738 int ret; 2754 int ret;
@@ -2754,7 +2770,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2754 if (req == NULL) 2770 if (req == NULL)
2755 return -ENOMEM; 2771 return -ENOMEM;
2756 2772
2757 ret = i915_gem_get_seqno(engine->dev, &req->seqno); 2773 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
2758 if (ret) 2774 if (ret)
2759 goto err; 2775 goto err;
2760 2776
@@ -2765,15 +2781,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2765 req->ctx = ctx; 2781 req->ctx = ctx;
2766 i915_gem_context_reference(req->ctx); 2782 i915_gem_context_reference(req->ctx);
2767 2783
2768 if (i915.enable_execlists)
2769 ret = intel_logical_ring_alloc_request_extras(req);
2770 else
2771 ret = intel_ring_alloc_request_extras(req);
2772 if (ret) {
2773 i915_gem_context_unreference(req->ctx);
2774 goto err;
2775 }
2776
2777 /* 2784 /*
2778 * Reserve space in the ring buffer for all the commands required to 2785 * Reserve space in the ring buffer for all the commands required to
2779 * eventually emit this request. This is to guarantee that the 2786 * eventually emit this request. This is to guarantee that the
@@ -2781,24 +2788,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2781 * to be redone if the request is not actually submitted straight 2788 * to be redone if the request is not actually submitted straight
2782 * away, e.g. because a GPU scheduler has deferred it. 2789 * away, e.g. because a GPU scheduler has deferred it.
2783 */ 2790 */
2791 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
2792
2784 if (i915.enable_execlists) 2793 if (i915.enable_execlists)
2785 ret = intel_logical_ring_reserve_space(req); 2794 ret = intel_logical_ring_alloc_request_extras(req);
2786 else 2795 else
2787 ret = intel_ring_reserve_space(req); 2796 ret = intel_ring_alloc_request_extras(req);
2788 if (ret) { 2797 if (ret)
2789 /* 2798 goto err_ctx;
2790 * At this point, the request is fully allocated even if not
2791 * fully prepared. Thus it can be cleaned up using the proper
2792 * free code.
2793 */
2794 intel_ring_reserved_space_cancel(req->ringbuf);
2795 i915_gem_request_unreference(req);
2796 return ret;
2797 }
2798 2799
2799 *req_out = req; 2800 *req_out = req;
2800 return 0; 2801 return 0;
2801 2802
2803err_ctx:
2804 i915_gem_context_unreference(ctx);
2802err: 2805err:
2803 kmem_cache_free(dev_priv->requests, req); 2806 kmem_cache_free(dev_priv->requests, req);
2804 return ret; 2807 return ret;
@@ -2824,7 +2827,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
2824 int err; 2827 int err;
2825 2828
2826 if (ctx == NULL) 2829 if (ctx == NULL)
2827 ctx = to_i915(engine->dev)->kernel_context; 2830 ctx = engine->i915->kernel_context;
2828 err = __i915_gem_request_alloc(engine, ctx, &req); 2831 err = __i915_gem_request_alloc(engine, ctx, &req);
2829 return err ? ERR_PTR(err) : req; 2832 return err ? ERR_PTR(err) : req;
2830} 2833}
@@ -2888,13 +2891,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2888 /* Ensure irq handler finishes or is cancelled. */ 2891 /* Ensure irq handler finishes or is cancelled. */
2889 tasklet_kill(&engine->irq_tasklet); 2892 tasklet_kill(&engine->irq_tasklet);
2890 2893
2891 spin_lock_bh(&engine->execlist_lock); 2894 intel_execlists_cancel_requests(engine);
2892 /* list_splice_tail_init checks for empty lists */
2893 list_splice_tail_init(&engine->execlist_queue,
2894 &engine->execlist_retired_req_list);
2895 spin_unlock_bh(&engine->execlist_lock);
2896
2897 intel_execlists_retire_requests(engine);
2898 } 2895 }
2899 2896
2900 /* 2897 /*
@@ -3005,9 +3002,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
3005} 3002}
3006 3003
3007bool 3004bool
3008i915_gem_retire_requests(struct drm_device *dev) 3005i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3009{ 3006{
3010 struct drm_i915_private *dev_priv = dev->dev_private;
3011 struct intel_engine_cs *engine; 3007 struct intel_engine_cs *engine;
3012 bool idle = true; 3008 bool idle = true;
3013 3009
@@ -3018,8 +3014,6 @@ i915_gem_retire_requests(struct drm_device *dev)
3018 spin_lock_bh(&engine->execlist_lock); 3014 spin_lock_bh(&engine->execlist_lock);
3019 idle &= list_empty(&engine->execlist_queue); 3015 idle &= list_empty(&engine->execlist_queue);
3020 spin_unlock_bh(&engine->execlist_lock); 3016 spin_unlock_bh(&engine->execlist_lock);
3021
3022 intel_execlists_retire_requests(engine);
3023 } 3017 }
3024 } 3018 }
3025 3019
@@ -3042,7 +3036,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
3042 /* Come back later if the device is busy... */ 3036 /* Come back later if the device is busy... */
3043 idle = false; 3037 idle = false;
3044 if (mutex_trylock(&dev->struct_mutex)) { 3038 if (mutex_trylock(&dev->struct_mutex)) {
3045 idle = i915_gem_retire_requests(dev); 3039 idle = i915_gem_retire_requests(dev_priv);
3046 mutex_unlock(&dev->struct_mutex); 3040 mutex_unlock(&dev->struct_mutex);
3047 } 3041 }
3048 if (!idle) 3042 if (!idle)
@@ -3066,7 +3060,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
3066 * Also locking seems to be fubar here, engine->request_list is protected 3060 * Also locking seems to be fubar here, engine->request_list is protected
3067 * by dev->struct_mutex. */ 3061 * by dev->struct_mutex. */
3068 3062
3069 intel_mark_idle(dev); 3063 intel_mark_idle(dev_priv);
3070 3064
3071 if (mutex_trylock(&dev->struct_mutex)) { 3065 if (mutex_trylock(&dev->struct_mutex)) {
3072 for_each_engine(engine, dev_priv) 3066 for_each_engine(engine, dev_priv)
@@ -3096,14 +3090,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3096 if (req == NULL) 3090 if (req == NULL)
3097 continue; 3091 continue;
3098 3092
3099 if (list_empty(&req->list)) 3093 if (i915_gem_request_completed(req, true))
3100 goto retire;
3101
3102 if (i915_gem_request_completed(req, true)) {
3103 __i915_gem_request_retire__upto(req);
3104retire:
3105 i915_gem_object_retire__read(obj, i); 3094 i915_gem_object_retire__read(obj, i);
3106 }
3107 } 3095 }
3108 3096
3109 return 0; 3097 return 0;
@@ -3185,7 +3173,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3185 ret = __i915_wait_request(req[i], true, 3173 ret = __i915_wait_request(req[i], true,
3186 args->timeout_ns > 0 ? &args->timeout_ns : NULL, 3174 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3187 to_rps_client(file)); 3175 to_rps_client(file));
3188 i915_gem_request_unreference__unlocked(req[i]); 3176 i915_gem_request_unreference(req[i]);
3189 } 3177 }
3190 return ret; 3178 return ret;
3191 3179
@@ -3211,7 +3199,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3211 if (i915_gem_request_completed(from_req, true)) 3199 if (i915_gem_request_completed(from_req, true))
3212 return 0; 3200 return 0;
3213 3201
3214 if (!i915_semaphore_is_enabled(obj->base.dev)) { 3202 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
3215 struct drm_i915_private *i915 = to_i915(obj->base.dev); 3203 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3216 ret = __i915_wait_request(from_req, 3204 ret = __i915_wait_request(from_req,
3217 i915->mm.interruptible, 3205 i915->mm.interruptible,
@@ -3345,6 +3333,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3345 old_write_domain); 3333 old_write_domain);
3346} 3334}
3347 3335
3336static void __i915_vma_iounmap(struct i915_vma *vma)
3337{
3338 GEM_BUG_ON(vma->pin_count);
3339
3340 if (vma->iomap == NULL)
3341 return;
3342
3343 io_mapping_unmap(vma->iomap);
3344 vma->iomap = NULL;
3345}
3346
3348static int __i915_vma_unbind(struct i915_vma *vma, bool wait) 3347static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3349{ 3348{
3350 struct drm_i915_gem_object *obj = vma->obj; 3349 struct drm_i915_gem_object *obj = vma->obj;
@@ -3377,6 +3376,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3377 ret = i915_gem_object_put_fence(obj); 3376 ret = i915_gem_object_put_fence(obj);
3378 if (ret) 3377 if (ret)
3379 return ret; 3378 return ret;
3379
3380 __i915_vma_iounmap(vma);
3380 } 3381 }
3381 3382
3382 trace_i915_vma_unbind(vma); 3383 trace_i915_vma_unbind(vma);
@@ -3731,7 +3732,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3731 return; 3732 return;
3732 3733
3733 if (i915_gem_clflush_object(obj, obj->pin_display)) 3734 if (i915_gem_clflush_object(obj, obj->pin_display))
3734 i915_gem_chipset_flush(obj->base.dev); 3735 i915_gem_chipset_flush(to_i915(obj->base.dev));
3735 3736
3736 old_write_domain = obj->base.write_domain; 3737 old_write_domain = obj->base.write_domain;
3737 obj->base.write_domain = 0; 3738 obj->base.write_domain = 0;
@@ -3929,7 +3930,7 @@ out:
3929 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 3930 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3930 cpu_write_needs_clflush(obj)) { 3931 cpu_write_needs_clflush(obj)) {
3931 if (i915_gem_clflush_object(obj, true)) 3932 if (i915_gem_clflush_object(obj, true))
3932 i915_gem_chipset_flush(obj->base.dev); 3933 i915_gem_chipset_flush(to_i915(obj->base.dev));
3933 } 3934 }
3934 3935
3935 return 0; 3936 return 0;
@@ -4198,7 +4199,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4198 if (ret == 0) 4199 if (ret == 0)
4199 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4200 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4200 4201
4201 i915_gem_request_unreference__unlocked(target); 4202 i915_gem_request_unreference(target);
4202 4203
4203 return ret; 4204 return ret;
4204} 4205}
@@ -4499,21 +4500,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4499 .put_pages = i915_gem_object_put_pages_gtt, 4500 .put_pages = i915_gem_object_put_pages_gtt,
4500}; 4501};
4501 4502
4502struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 4503struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4503 size_t size) 4504 size_t size)
4504{ 4505{
4505 struct drm_i915_gem_object *obj; 4506 struct drm_i915_gem_object *obj;
4506 struct address_space *mapping; 4507 struct address_space *mapping;
4507 gfp_t mask; 4508 gfp_t mask;
4509 int ret;
4508 4510
4509 obj = i915_gem_object_alloc(dev); 4511 obj = i915_gem_object_alloc(dev);
4510 if (obj == NULL) 4512 if (obj == NULL)
4511 return NULL; 4513 return ERR_PTR(-ENOMEM);
4512 4514
4513 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 4515 ret = drm_gem_object_init(dev, &obj->base, size);
4514 i915_gem_object_free(obj); 4516 if (ret)
4515 return NULL; 4517 goto fail;
4516 }
4517 4518
4518 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4519 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4519 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { 4520 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4551,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4550 trace_i915_gem_object_create(obj); 4551 trace_i915_gem_object_create(obj);
4551 4552
4552 return obj; 4553 return obj;
4554
4555fail:
4556 i915_gem_object_free(obj);
4557
4558 return ERR_PTR(ret);
4553} 4559}
4554 4560
4555static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4561static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4655,16 +4661,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4655struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 4661struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4656 const struct i915_ggtt_view *view) 4662 const struct i915_ggtt_view *view)
4657{ 4663{
4658 struct drm_device *dev = obj->base.dev;
4659 struct drm_i915_private *dev_priv = to_i915(dev);
4660 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4661 struct i915_vma *vma; 4664 struct i915_vma *vma;
4662 4665
4663 BUG_ON(!view); 4666 GEM_BUG_ON(!view);
4664 4667
4665 list_for_each_entry(vma, &obj->vma_list, obj_link) 4668 list_for_each_entry(vma, &obj->vma_list, obj_link)
4666 if (vma->vm == &ggtt->base && 4669 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4667 i915_ggtt_view_equal(&vma->ggtt_view, view))
4668 return vma; 4670 return vma;
4669 return NULL; 4671 return NULL;
4670} 4672}
@@ -4706,9 +4708,10 @@ i915_gem_suspend(struct drm_device *dev)
4706 if (ret) 4708 if (ret)
4707 goto err; 4709 goto err;
4708 4710
4709 i915_gem_retire_requests(dev); 4711 i915_gem_retire_requests(dev_priv);
4710 4712
4711 i915_gem_stop_engines(dev); 4713 i915_gem_stop_engines(dev);
4714 i915_gem_context_lost(dev_priv);
4712 mutex_unlock(&dev->struct_mutex); 4715 mutex_unlock(&dev->struct_mutex);
4713 4716
4714 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4717 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4727,37 +4730,6 @@ err:
4727 return ret; 4730 return ret;
4728} 4731}
4729 4732
4730int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4731{
4732 struct intel_engine_cs *engine = req->engine;
4733 struct drm_device *dev = engine->dev;
4734 struct drm_i915_private *dev_priv = dev->dev_private;
4735 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4736 int i, ret;
4737
4738 if (!HAS_L3_DPF(dev) || !remap_info)
4739 return 0;
4740
4741 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
4742 if (ret)
4743 return ret;
4744
4745 /*
4746 * Note: We do not worry about the concurrent register cacheline hang
4747 * here because no other code should access these registers other than
4748 * at initialization time.
4749 */
4750 for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4751 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
4752 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
4753 intel_ring_emit(engine, remap_info[i]);
4754 }
4755
4756 intel_ring_advance(engine);
4757
4758 return ret;
4759}
4760
4761void i915_gem_init_swizzling(struct drm_device *dev) 4733void i915_gem_init_swizzling(struct drm_device *dev)
4762{ 4734{
4763 struct drm_i915_private *dev_priv = dev->dev_private; 4735 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4862,7 +4834,7 @@ i915_gem_init_hw(struct drm_device *dev)
4862{ 4834{
4863 struct drm_i915_private *dev_priv = dev->dev_private; 4835 struct drm_i915_private *dev_priv = dev->dev_private;
4864 struct intel_engine_cs *engine; 4836 struct intel_engine_cs *engine;
4865 int ret, j; 4837 int ret;
4866 4838
4867 /* Double layer security blanket, see i915_gem_init() */ 4839 /* Double layer security blanket, see i915_gem_init() */
4868 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4840 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4928,44 +4900,6 @@ i915_gem_init_hw(struct drm_device *dev)
4928 * on re-initialisation 4900 * on re-initialisation
4929 */ 4901 */
4930 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); 4902 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
4931 if (ret)
4932 goto out;
4933
4934 /* Now it is safe to go back round and do everything else: */
4935 for_each_engine(engine, dev_priv) {
4936 struct drm_i915_gem_request *req;
4937
4938 req = i915_gem_request_alloc(engine, NULL);
4939 if (IS_ERR(req)) {
4940 ret = PTR_ERR(req);
4941 break;
4942 }
4943
4944 if (engine->id == RCS) {
4945 for (j = 0; j < NUM_L3_SLICES(dev); j++) {
4946 ret = i915_gem_l3_remap(req, j);
4947 if (ret)
4948 goto err_request;
4949 }
4950 }
4951
4952 ret = i915_ppgtt_init_ring(req);
4953 if (ret)
4954 goto err_request;
4955
4956 ret = i915_gem_context_enable(req);
4957 if (ret)
4958 goto err_request;
4959
4960err_request:
4961 i915_add_request_no_flush(req);
4962 if (ret) {
4963 DRM_ERROR("Failed to enable %s, error=%d\n",
4964 engine->name, ret);
4965 i915_gem_cleanup_engines(dev);
4966 break;
4967 }
4968 }
4969 4903
4970out: 4904out:
4971 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4905 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -4977,9 +4911,6 @@ int i915_gem_init(struct drm_device *dev)
4977 struct drm_i915_private *dev_priv = dev->dev_private; 4911 struct drm_i915_private *dev_priv = dev->dev_private;
4978 int ret; 4912 int ret;
4979 4913
4980 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4981 i915.enable_execlists);
4982
4983 mutex_lock(&dev->struct_mutex); 4914 mutex_lock(&dev->struct_mutex);
4984 4915
4985 if (!i915.enable_execlists) { 4916 if (!i915.enable_execlists) {
@@ -5002,10 +4933,7 @@ int i915_gem_init(struct drm_device *dev)
5002 */ 4933 */
5003 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4934 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5004 4935
5005 ret = i915_gem_init_userptr(dev); 4936 i915_gem_init_userptr(dev_priv);
5006 if (ret)
5007 goto out_unlock;
5008
5009 i915_gem_init_ggtt(dev); 4937 i915_gem_init_ggtt(dev);
5010 4938
5011 ret = i915_gem_context_init(dev); 4939 ret = i915_gem_context_init(dev);
@@ -5042,14 +4970,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
5042 4970
5043 for_each_engine(engine, dev_priv) 4971 for_each_engine(engine, dev_priv)
5044 dev_priv->gt.cleanup_engine(engine); 4972 dev_priv->gt.cleanup_engine(engine);
5045
5046 if (i915.enable_execlists)
5047 /*
5048 * Neither the BIOS, ourselves or any other kernel
5049 * expects the system to be in execlists mode on startup,
5050 * so we need to reset the GPU back to legacy mode.
5051 */
5052 intel_gpu_reset(dev, ALL_ENGINES);
5053} 4973}
5054 4974
5055static void 4975static void
@@ -5073,7 +4993,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5073 else 4993 else
5074 dev_priv->num_fence_regs = 8; 4994 dev_priv->num_fence_regs = 8;
5075 4995
5076 if (intel_vgpu_active(dev)) 4996 if (intel_vgpu_active(dev_priv))
5077 dev_priv->num_fence_regs = 4997 dev_priv->num_fence_regs =
5078 I915_READ(vgtif_reg(avail_rs.fence_num)); 4998 I915_READ(vgtif_reg(avail_rs.fence_num));
5079 4999
@@ -5148,6 +5068,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
5148 kmem_cache_destroy(dev_priv->objects); 5068 kmem_cache_destroy(dev_priv->objects);
5149} 5069}
5150 5070
5071int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5072{
5073 struct drm_i915_gem_object *obj;
5074
5075 /* Called just before we write the hibernation image.
5076 *
5077 * We need to update the domain tracking to reflect that the CPU
5078 * will be accessing all the pages to create and restore from the
5079 * hibernation, and so upon restoration those pages will be in the
5080 * CPU domain.
5081 *
5082 * To make sure the hibernation image contains the latest state,
5083 * we update that state just before writing out the image.
5084 */
5085
5086 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5087 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5088 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5089 }
5090
5091 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5092 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5093 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5094 }
5095
5096 return 0;
5097}
5098
5151void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5099void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5152{ 5100{
5153 struct drm_i915_file_private *file_priv = file->driver_priv; 5101 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5254,13 +5202,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5254u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 5202u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5255 const struct i915_ggtt_view *view) 5203 const struct i915_ggtt_view *view)
5256{ 5204{
5257 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5259 struct i915_vma *vma; 5205 struct i915_vma *vma;
5260 5206
5261 list_for_each_entry(vma, &o->vma_list, obj_link) 5207 list_for_each_entry(vma, &o->vma_list, obj_link)
5262 if (vma->vm == &ggtt->base && 5208 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
5263 i915_ggtt_view_equal(&vma->ggtt_view, view))
5264 return vma->node.start; 5209 return vma->node.start;
5265 5210
5266 WARN(1, "global vma for this object not found. (view=%u)\n", view->type); 5211 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5231,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5286bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 5231bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5287 const struct i915_ggtt_view *view) 5232 const struct i915_ggtt_view *view)
5288{ 5233{
5289 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5290 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5291 struct i915_vma *vma; 5234 struct i915_vma *vma;
5292 5235
5293 list_for_each_entry(vma, &o->vma_list, obj_link) 5236 list_for_each_entry(vma, &o->vma_list, obj_link)
5294 if (vma->vm == &ggtt->base && 5237 if (vma->is_ggtt &&
5295 i915_ggtt_view_equal(&vma->ggtt_view, view) && 5238 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5296 drm_mm_node_allocated(&vma->node)) 5239 drm_mm_node_allocated(&vma->node))
5297 return true; 5240 return true;
@@ -5310,23 +5253,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5310 return false; 5253 return false;
5311} 5254}
5312 5255
5313unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 5256unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
5314 struct i915_address_space *vm)
5315{ 5257{
5316 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5317 struct i915_vma *vma; 5258 struct i915_vma *vma;
5318 5259
5319 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5260 GEM_BUG_ON(list_empty(&o->vma_list));
5320
5321 BUG_ON(list_empty(&o->vma_list));
5322 5261
5323 list_for_each_entry(vma, &o->vma_list, obj_link) { 5262 list_for_each_entry(vma, &o->vma_list, obj_link) {
5324 if (vma->is_ggtt && 5263 if (vma->is_ggtt &&
5325 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5264 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5326 continue;
5327 if (vma->vm == vm)
5328 return vma->node.size; 5265 return vma->node.size;
5329 } 5266 }
5267
5330 return 0; 5268 return 0;
5331} 5269}
5332 5270
@@ -5365,8 +5303,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
5365 size_t bytes; 5303 size_t bytes;
5366 int ret; 5304 int ret;
5367 5305
5368 obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); 5306 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
5369 if (IS_ERR_OR_NULL(obj)) 5307 if (IS_ERR(obj))
5370 return obj; 5308 return obj;
5371 5309
5372 ret = i915_gem_object_set_to_cpu_domain(obj, true); 5310 ret = i915_gem_object_set_to_cpu_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f2968e..3752d5daa4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
134 if (obj == NULL) { 134 if (obj == NULL) {
135 int ret; 135 int ret;
136 136
137 obj = i915_gem_alloc_object(pool->dev, size); 137 obj = i915_gem_object_create(pool->dev, size);
138 if (obj == NULL) 138 if (IS_ERR(obj))
139 return ERR_PTR(-ENOMEM); 139 return obj;
140 140
141 ret = i915_gem_object_get_pages(obj); 141 ret = i915_gem_object_get_pages(obj);
142 if (ret) 142 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5acc3916f75..2aedd188473d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,8 @@
90#include "i915_drv.h" 90#include "i915_drv.h"
91#include "i915_trace.h" 91#include "i915_trace.h"
92 92
93#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
94
93/* This is a HW constraint. The value below is the largest known requirement 95/* This is a HW constraint. The value below is the largest known requirement
94 * I've seen in a spec to date, and that was a workaround for a non-shipping 96 * I've seen in a spec to date, and that was a workaround for a non-shipping
95 * part. It should be safe to decrease this, but it's more future proof as is. 97 * part. It should be safe to decrease this, but it's more future proof as is.
@@ -97,28 +99,27 @@
97#define GEN6_CONTEXT_ALIGN (64<<10) 99#define GEN6_CONTEXT_ALIGN (64<<10)
98#define GEN7_CONTEXT_ALIGN 4096 100#define GEN7_CONTEXT_ALIGN 4096
99 101
100static size_t get_context_alignment(struct drm_device *dev) 102static size_t get_context_alignment(struct drm_i915_private *dev_priv)
101{ 103{
102 if (IS_GEN6(dev)) 104 if (IS_GEN6(dev_priv))
103 return GEN6_CONTEXT_ALIGN; 105 return GEN6_CONTEXT_ALIGN;
104 106
105 return GEN7_CONTEXT_ALIGN; 107 return GEN7_CONTEXT_ALIGN;
106} 108}
107 109
108static int get_context_size(struct drm_device *dev) 110static int get_context_size(struct drm_i915_private *dev_priv)
109{ 111{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret; 112 int ret;
112 u32 reg; 113 u32 reg;
113 114
114 switch (INTEL_INFO(dev)->gen) { 115 switch (INTEL_GEN(dev_priv)) {
115 case 6: 116 case 6:
116 reg = I915_READ(CXT_SIZE); 117 reg = I915_READ(CXT_SIZE);
117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 break; 119 break;
119 case 7: 120 case 7:
120 reg = I915_READ(GEN7_CXT_SIZE); 121 reg = I915_READ(GEN7_CXT_SIZE);
121 if (IS_HASWELL(dev)) 122 if (IS_HASWELL(dev_priv))
122 ret = HSW_CXT_TOTAL_SIZE; 123 ret = HSW_CXT_TOTAL_SIZE;
123 else 124 else
124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 125 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -169,6 +170,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
169 if (ctx->legacy_hw_ctx.rcs_state) 170 if (ctx->legacy_hw_ctx.rcs_state)
170 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 171 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
171 list_del(&ctx->link); 172 list_del(&ctx->link);
173
174 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
172 kfree(ctx); 175 kfree(ctx);
173} 176}
174 177
@@ -178,9 +181,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
178 struct drm_i915_gem_object *obj; 181 struct drm_i915_gem_object *obj;
179 int ret; 182 int ret;
180 183
181 obj = i915_gem_alloc_object(dev, size); 184 obj = i915_gem_object_create(dev, size);
182 if (obj == NULL) 185 if (IS_ERR(obj))
183 return ERR_PTR(-ENOMEM); 186 return obj;
184 187
185 /* 188 /*
186 * Try to make the context utilize L3 as well as LLC. 189 * Try to make the context utilize L3 as well as LLC.
@@ -209,6 +212,28 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
209 return obj; 212 return obj;
210} 213}
211 214
215static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
216{
217 int ret;
218
219 ret = ida_simple_get(&dev_priv->context_hw_ida,
220 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
221 if (ret < 0) {
222 /* Contexts are only released when no longer active.
223 * Flush any pending retires to hopefully release some
224 * stale contexts and try again.
225 */
226 i915_gem_retire_requests(dev_priv);
227 ret = ida_simple_get(&dev_priv->context_hw_ida,
228 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
229 if (ret < 0)
230 return ret;
231 }
232
233 *out = ret;
234 return 0;
235}
236
212static struct intel_context * 237static struct intel_context *
213__create_hw_context(struct drm_device *dev, 238__create_hw_context(struct drm_device *dev,
214 struct drm_i915_file_private *file_priv) 239 struct drm_i915_file_private *file_priv)
@@ -221,6 +246,12 @@ __create_hw_context(struct drm_device *dev,
221 if (ctx == NULL) 246 if (ctx == NULL)
222 return ERR_PTR(-ENOMEM); 247 return ERR_PTR(-ENOMEM);
223 248
249 ret = assign_hw_id(dev_priv, &ctx->hw_id);
250 if (ret) {
251 kfree(ctx);
252 return ERR_PTR(ret);
253 }
254
224 kref_init(&ctx->ref); 255 kref_init(&ctx->ref);
225 list_add_tail(&ctx->link, &dev_priv->context_list); 256 list_add_tail(&ctx->link, &dev_priv->context_list);
226 ctx->i915 = dev_priv; 257 ctx->i915 = dev_priv;
@@ -249,7 +280,7 @@ __create_hw_context(struct drm_device *dev,
249 /* NB: Mark all slices as needing a remap so that when the context first 280 /* NB: Mark all slices as needing a remap so that when the context first
250 * loads it will restore whatever remap state already exists. If there 281 * loads it will restore whatever remap state already exists. If there
251 * is no remap info, it will be a NOP. */ 282 * is no remap info, it will be a NOP. */
252 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 283 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
253 284
254 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; 285 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
255 286
@@ -288,7 +319,7 @@ i915_gem_create_context(struct drm_device *dev,
288 * context. 319 * context.
289 */ 320 */
290 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, 321 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
291 get_context_alignment(dev), 0); 322 get_context_alignment(to_i915(dev)), 0);
292 if (ret) { 323 if (ret) {
293 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 324 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
294 goto err_destroy; 325 goto err_destroy;
@@ -336,7 +367,6 @@ static void i915_gem_context_unpin(struct intel_context *ctx,
336void i915_gem_context_reset(struct drm_device *dev) 367void i915_gem_context_reset(struct drm_device *dev)
337{ 368{
338 struct drm_i915_private *dev_priv = dev->dev_private; 369 struct drm_i915_private *dev_priv = dev->dev_private;
339 int i;
340 370
341 if (i915.enable_execlists) { 371 if (i915.enable_execlists) {
342 struct intel_context *ctx; 372 struct intel_context *ctx;
@@ -345,17 +375,7 @@ void i915_gem_context_reset(struct drm_device *dev)
345 intel_lr_context_reset(dev_priv, ctx); 375 intel_lr_context_reset(dev_priv, ctx);
346 } 376 }
347 377
348 for (i = 0; i < I915_NUM_ENGINES; i++) { 378 i915_gem_context_lost(dev_priv);
349 struct intel_engine_cs *engine = &dev_priv->engine[i];
350
351 if (engine->last_context) {
352 i915_gem_context_unpin(engine->last_context, engine);
353 engine->last_context = NULL;
354 }
355 }
356
357 /* Force the GPU state to be reinitialised on enabling */
358 dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
359} 379}
360 380
361int i915_gem_context_init(struct drm_device *dev) 381int i915_gem_context_init(struct drm_device *dev)
@@ -368,19 +388,25 @@ int i915_gem_context_init(struct drm_device *dev)
368 if (WARN_ON(dev_priv->kernel_context)) 388 if (WARN_ON(dev_priv->kernel_context))
369 return 0; 389 return 0;
370 390
371 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { 391 if (intel_vgpu_active(dev_priv) &&
392 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
372 if (!i915.enable_execlists) { 393 if (!i915.enable_execlists) {
373 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); 394 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
374 return -EINVAL; 395 return -EINVAL;
375 } 396 }
376 } 397 }
377 398
399 /* Using the simple ida interface, the max is limited by sizeof(int) */
400 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
401 ida_init(&dev_priv->context_hw_ida);
402
378 if (i915.enable_execlists) { 403 if (i915.enable_execlists) {
379 /* NB: intentionally left blank. We will allocate our own 404 /* NB: intentionally left blank. We will allocate our own
380 * backing objects as we need them, thank you very much */ 405 * backing objects as we need them, thank you very much */
381 dev_priv->hw_context_size = 0; 406 dev_priv->hw_context_size = 0;
382 } else if (HAS_HW_CONTEXTS(dev)) { 407 } else if (HAS_HW_CONTEXTS(dev_priv)) {
383 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 408 dev_priv->hw_context_size =
409 round_up(get_context_size(dev_priv), 4096);
384 if (dev_priv->hw_context_size > (1<<20)) { 410 if (dev_priv->hw_context_size > (1<<20)) {
385 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 411 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
386 dev_priv->hw_context_size); 412 dev_priv->hw_context_size);
@@ -403,61 +429,35 @@ int i915_gem_context_init(struct drm_device *dev)
403 return 0; 429 return 0;
404} 430}
405 431
406void i915_gem_context_fini(struct drm_device *dev) 432void i915_gem_context_lost(struct drm_i915_private *dev_priv)
407{ 433{
408 struct drm_i915_private *dev_priv = dev->dev_private; 434 struct intel_engine_cs *engine;
409 struct intel_context *dctx = dev_priv->kernel_context;
410 int i;
411
412 if (dctx->legacy_hw_ctx.rcs_state) {
413 /* The only known way to stop the gpu from accessing the hw context is
414 * to reset it. Do this as the very last operation to avoid confusing
415 * other code, leading to spurious errors. */
416 intel_gpu_reset(dev, ALL_ENGINES);
417
418 /* When default context is created and switched to, base object refcount
419 * will be 2 (+1 from object creation and +1 from do_switch()).
420 * i915_gem_context_fini() will be called after gpu_idle() has switched
421 * to default context. So we need to unreference the base object once
422 * to offset the do_switch part, so that i915_gem_context_unreference()
423 * can then free the base object correctly. */
424 WARN_ON(!dev_priv->engine[RCS].last_context);
425 435
426 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 436 for_each_engine(engine, dev_priv) {
427 } 437 if (engine->last_context == NULL)
428 438 continue;
429 for (i = I915_NUM_ENGINES; --i >= 0;) {
430 struct intel_engine_cs *engine = &dev_priv->engine[i];
431 439
432 if (engine->last_context) { 440 i915_gem_context_unpin(engine->last_context, engine);
433 i915_gem_context_unpin(engine->last_context, engine); 441 engine->last_context = NULL;
434 engine->last_context = NULL;
435 }
436 } 442 }
437 443
438 i915_gem_context_unreference(dctx); 444 /* Force the GPU state to be reinitialised on enabling */
439 dev_priv->kernel_context = NULL; 445 dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
446 dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
440} 447}
441 448
442int i915_gem_context_enable(struct drm_i915_gem_request *req) 449void i915_gem_context_fini(struct drm_device *dev)
443{ 450{
444 struct intel_engine_cs *engine = req->engine; 451 struct drm_i915_private *dev_priv = dev->dev_private;
445 int ret; 452 struct intel_context *dctx = dev_priv->kernel_context;
446
447 if (i915.enable_execlists) {
448 if (engine->init_context == NULL)
449 return 0;
450 453
451 ret = engine->init_context(req); 454 if (dctx->legacy_hw_ctx.rcs_state)
452 } else 455 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
453 ret = i915_switch_context(req);
454 456
455 if (ret) { 457 i915_gem_context_unreference(dctx);
456 DRM_ERROR("ring init context: %d\n", ret); 458 dev_priv->kernel_context = NULL;
457 return ret;
458 }
459 459
460 return 0; 460 ida_destroy(&dev_priv->context_hw_ida);
461} 461}
462 462
463static int context_idr_cleanup(int id, void *p, void *data) 463static int context_idr_cleanup(int id, void *p, void *data)
@@ -510,12 +510,13 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
510static inline int 510static inline int
511mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 511mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
512{ 512{
513 struct drm_i915_private *dev_priv = req->i915;
513 struct intel_engine_cs *engine = req->engine; 514 struct intel_engine_cs *engine = req->engine;
514 u32 flags = hw_flags | MI_MM_SPACE_GTT; 515 u32 flags = hw_flags | MI_MM_SPACE_GTT;
515 const int num_rings = 516 const int num_rings =
516 /* Use an extended w/a on ivb+ if signalling from other rings */ 517 /* Use an extended w/a on ivb+ if signalling from other rings */
517 i915_semaphore_is_enabled(engine->dev) ? 518 i915_semaphore_is_enabled(dev_priv) ?
518 hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : 519 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
519 0; 520 0;
520 int len, ret; 521 int len, ret;
521 522
@@ -524,21 +525,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
524 * explicitly, so we rely on the value at ring init, stored in 525 * explicitly, so we rely on the value at ring init, stored in
525 * itlb_before_ctx_switch. 526 * itlb_before_ctx_switch.
526 */ 527 */
527 if (IS_GEN6(engine->dev)) { 528 if (IS_GEN6(dev_priv)) {
528 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); 529 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
529 if (ret) 530 if (ret)
530 return ret; 531 return ret;
531 } 532 }
532 533
533 /* These flags are for resource streamer on HSW+ */ 534 /* These flags are for resource streamer on HSW+ */
534 if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) 535 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
535 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 536 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
536 else if (INTEL_INFO(engine->dev)->gen < 8) 537 else if (INTEL_GEN(dev_priv) < 8)
537 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 538 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
538 539
539 540
540 len = 4; 541 len = 4;
541 if (INTEL_INFO(engine->dev)->gen >= 7) 542 if (INTEL_GEN(dev_priv) >= 7)
542 len += 2 + (num_rings ? 4*num_rings + 6 : 0); 543 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
543 544
544 ret = intel_ring_begin(req, len); 545 ret = intel_ring_begin(req, len);
@@ -546,14 +547,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
546 return ret; 547 return ret;
547 548
548 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 549 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
549 if (INTEL_INFO(engine->dev)->gen >= 7) { 550 if (INTEL_GEN(dev_priv) >= 7) {
550 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); 551 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
551 if (num_rings) { 552 if (num_rings) {
552 struct intel_engine_cs *signaller; 553 struct intel_engine_cs *signaller;
553 554
554 intel_ring_emit(engine, 555 intel_ring_emit(engine,
555 MI_LOAD_REGISTER_IMM(num_rings)); 556 MI_LOAD_REGISTER_IMM(num_rings));
556 for_each_engine(signaller, to_i915(engine->dev)) { 557 for_each_engine(signaller, dev_priv) {
557 if (signaller == engine) 558 if (signaller == engine)
558 continue; 559 continue;
559 560
@@ -576,14 +577,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
576 */ 577 */
577 intel_ring_emit(engine, MI_NOOP); 578 intel_ring_emit(engine, MI_NOOP);
578 579
579 if (INTEL_INFO(engine->dev)->gen >= 7) { 580 if (INTEL_GEN(dev_priv) >= 7) {
580 if (num_rings) { 581 if (num_rings) {
581 struct intel_engine_cs *signaller; 582 struct intel_engine_cs *signaller;
582 i915_reg_t last_reg = {}; /* keep gcc quiet */ 583 i915_reg_t last_reg = {}; /* keep gcc quiet */
583 584
584 intel_ring_emit(engine, 585 intel_ring_emit(engine,
585 MI_LOAD_REGISTER_IMM(num_rings)); 586 MI_LOAD_REGISTER_IMM(num_rings));
586 for_each_engine(signaller, to_i915(engine->dev)) { 587 for_each_engine(signaller, dev_priv) {
587 if (signaller == engine) 588 if (signaller == engine)
588 continue; 589 continue;
589 590
@@ -609,7 +610,37 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
609 return ret; 610 return ret;
610} 611}
611 612
612static inline bool skip_rcs_switch(struct intel_engine_cs *engine, 613static int remap_l3(struct drm_i915_gem_request *req, int slice)
614{
615 u32 *remap_info = req->i915->l3_parity.remap_info[slice];
616 struct intel_engine_cs *engine = req->engine;
617 int i, ret;
618
619 if (!remap_info)
620 return 0;
621
622 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
623 if (ret)
624 return ret;
625
626 /*
627 * Note: We do not worry about the concurrent register cacheline hang
628 * here because no other code should access these registers other than
629 * at initialization time.
630 */
631 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
632 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
633 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
634 intel_ring_emit(engine, remap_info[i]);
635 }
636 intel_ring_emit(engine, MI_NOOP);
637 intel_ring_advance(engine);
638
639 return 0;
640}
641
642static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
643 struct intel_engine_cs *engine,
613 struct intel_context *to) 644 struct intel_context *to)
614{ 645{
615 if (to->remap_slice) 646 if (to->remap_slice)
@@ -618,36 +649,44 @@ static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
618 if (!to->legacy_hw_ctx.initialized) 649 if (!to->legacy_hw_ctx.initialized)
619 return false; 650 return false;
620 651
621 if (to->ppgtt && 652 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
622 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
623 return false; 653 return false;
624 654
625 return to == engine->last_context; 655 return to == engine->last_context;
626} 656}
627 657
628static bool 658static bool
629needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) 659needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
660 struct intel_engine_cs *engine,
661 struct intel_context *to)
630{ 662{
631 if (!to->ppgtt) 663 if (!ppgtt)
632 return false; 664 return false;
633 665
666 /* Always load the ppgtt on first use */
667 if (!engine->last_context)
668 return true;
669
670 /* Same context without new entries, skip */
634 if (engine->last_context == to && 671 if (engine->last_context == to &&
635 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) 672 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
636 return false; 673 return false;
637 674
638 if (engine->id != RCS) 675 if (engine->id != RCS)
639 return true; 676 return true;
640 677
641 if (INTEL_INFO(engine->dev)->gen < 8) 678 if (INTEL_GEN(engine->i915) < 8)
642 return true; 679 return true;
643 680
644 return false; 681 return false;
645} 682}
646 683
647static bool 684static bool
648needs_pd_load_post(struct intel_context *to, u32 hw_flags) 685needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
686 struct intel_context *to,
687 u32 hw_flags)
649{ 688{
650 if (!to->ppgtt) 689 if (!ppgtt)
651 return false; 690 return false;
652 691
653 if (!IS_GEN8(to->i915)) 692 if (!IS_GEN8(to->i915))
@@ -663,16 +702,17 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
663{ 702{
664 struct intel_context *to = req->ctx; 703 struct intel_context *to = req->ctx;
665 struct intel_engine_cs *engine = req->engine; 704 struct intel_engine_cs *engine = req->engine;
705 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
666 struct intel_context *from; 706 struct intel_context *from;
667 u32 hw_flags; 707 u32 hw_flags;
668 int ret, i; 708 int ret, i;
669 709
670 if (skip_rcs_switch(engine, to)) 710 if (skip_rcs_switch(ppgtt, engine, to))
671 return 0; 711 return 0;
672 712
673 /* Trying to pin first makes error handling easier. */ 713 /* Trying to pin first makes error handling easier. */
674 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 714 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
675 get_context_alignment(engine->dev), 715 get_context_alignment(engine->i915),
676 0); 716 0);
677 if (ret) 717 if (ret)
678 return ret; 718 return ret;
@@ -698,13 +738,13 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
698 if (ret) 738 if (ret)
699 goto unpin_out; 739 goto unpin_out;
700 740
701 if (needs_pd_load_pre(engine, to)) { 741 if (needs_pd_load_pre(ppgtt, engine, to)) {
702 /* Older GENs and non render rings still want the load first, 742 /* Older GENs and non render rings still want the load first,
703 * "PP_DCLV followed by PP_DIR_BASE register through Load 743 * "PP_DCLV followed by PP_DIR_BASE register through Load
704 * Register Immediate commands in Ring Buffer before submitting 744 * Register Immediate commands in Ring Buffer before submitting
705 * a context."*/ 745 * a context."*/
706 trace_switch_mm(engine, to); 746 trace_switch_mm(engine, to);
707 ret = to->ppgtt->switch_mm(to->ppgtt, req); 747 ret = ppgtt->switch_mm(ppgtt, req);
708 if (ret) 748 if (ret)
709 goto unpin_out; 749 goto unpin_out;
710 } 750 }
@@ -715,16 +755,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
715 * space. This means we must enforce that a page table load 755 * space. This means we must enforce that a page table load
716 * occur when this occurs. */ 756 * occur when this occurs. */
717 hw_flags = MI_RESTORE_INHIBIT; 757 hw_flags = MI_RESTORE_INHIBIT;
718 else if (to->ppgtt && 758 else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
719 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
720 hw_flags = MI_FORCE_RESTORE; 759 hw_flags = MI_FORCE_RESTORE;
721 else 760 else
722 hw_flags = 0; 761 hw_flags = 0;
723 762
724 /* We should never emit switch_mm more than once */
725 WARN_ON(needs_pd_load_pre(engine, to) &&
726 needs_pd_load_post(to, hw_flags));
727
728 if (to != from || (hw_flags & MI_FORCE_RESTORE)) { 763 if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
729 ret = mi_set_context(req, hw_flags); 764 ret = mi_set_context(req, hw_flags);
730 if (ret) 765 if (ret)
@@ -759,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
759 /* GEN8 does *not* require an explicit reload if the PDPs have been 794 /* GEN8 does *not* require an explicit reload if the PDPs have been
760 * setup, and we do not wish to move them. 795 * setup, and we do not wish to move them.
761 */ 796 */
762 if (needs_pd_load_post(to, hw_flags)) { 797 if (needs_pd_load_post(ppgtt, to, hw_flags)) {
763 trace_switch_mm(engine, to); 798 trace_switch_mm(engine, to);
764 ret = to->ppgtt->switch_mm(to->ppgtt, req); 799 ret = ppgtt->switch_mm(ppgtt, req);
765 /* The hardware context switch is emitted, but we haven't 800 /* The hardware context switch is emitted, but we haven't
766 * actually changed the state - so it's probably safe to bail 801 * actually changed the state - so it's probably safe to bail
767 * here. Still, let the user know something dangerous has 802 * here. Still, let the user know something dangerous has
@@ -771,14 +806,14 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
771 return ret; 806 return ret;
772 } 807 }
773 808
774 if (to->ppgtt) 809 if (ppgtt)
775 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 810 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
776 811
777 for (i = 0; i < MAX_L3_SLICES; i++) { 812 for (i = 0; i < MAX_L3_SLICES; i++) {
778 if (!(to->remap_slice & (1<<i))) 813 if (!(to->remap_slice & (1<<i)))
779 continue; 814 continue;
780 815
781 ret = i915_gem_l3_remap(req, i); 816 ret = remap_l3(req, i);
782 if (ret) 817 if (ret)
783 return ret; 818 return ret;
784 819
@@ -825,17 +860,18 @@ int i915_switch_context(struct drm_i915_gem_request *req)
825 if (engine->id != RCS || 860 if (engine->id != RCS ||
826 req->ctx->legacy_hw_ctx.rcs_state == NULL) { 861 req->ctx->legacy_hw_ctx.rcs_state == NULL) {
827 struct intel_context *to = req->ctx; 862 struct intel_context *to = req->ctx;
863 struct i915_hw_ppgtt *ppgtt =
864 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
828 865
829 if (needs_pd_load_pre(engine, to)) { 866 if (needs_pd_load_pre(ppgtt, engine, to)) {
830 int ret; 867 int ret;
831 868
832 trace_switch_mm(engine, to); 869 trace_switch_mm(engine, to);
833 ret = to->ppgtt->switch_mm(to->ppgtt, req); 870 ret = ppgtt->switch_mm(ppgtt, req);
834 if (ret) 871 if (ret)
835 return ret; 872 return ret;
836 873
837 /* Doing a PD load always reloads the page dirs */ 874 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
838 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
839 } 875 }
840 876
841 if (to != engine->last_context) { 877 if (to != engine->last_context) {
@@ -1004,3 +1040,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1004 1040
1005 return ret; 1041 return ret;
1006} 1042}
1043
1044int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1045 void *data, struct drm_file *file)
1046{
1047 struct drm_i915_private *dev_priv = dev->dev_private;
1048 struct drm_i915_reset_stats *args = data;
1049 struct i915_ctx_hang_stats *hs;
1050 struct intel_context *ctx;
1051 int ret;
1052
1053 if (args->flags || args->pad)
1054 return -EINVAL;
1055
1056 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1057 return -EPERM;
1058
1059 ret = i915_mutex_lock_interruptible(dev);
1060 if (ret)
1061 return ret;
1062
1063 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1064 if (IS_ERR(ctx)) {
1065 mutex_unlock(&dev->struct_mutex);
1066 return PTR_ERR(ctx);
1067 }
1068 hs = &ctx->hang_stats;
1069
1070 if (capable(CAP_SYS_ADMIN))
1071 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1072 else
1073 args->reset_count = 0;
1074
1075 args->batch_active = hs->batch_active;
1076 args->batch_pending = hs->batch_pending;
1077
1078 mutex_unlock(&dev->struct_mutex);
1079
1080 return 0;
1081}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd228..b144c3f5c650 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -154,7 +154,7 @@ none:
154 if (ret) 154 if (ret)
155 return ret; 155 return ret;
156 156
157 i915_gem_retire_requests(dev); 157 i915_gem_retire_requests(to_i915(dev));
158 goto search_again; 158 goto search_again;
159 } 159 }
160 160
@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
265 if (ret) 265 if (ret)
266 return ret; 266 return ret;
267 267
268 i915_gem_retire_requests(vm->dev); 268 i915_gem_retire_requests(to_i915(vm->dev));
269 269
270 WARN_ON(!list_empty(&vm->active_list)); 270 WARN_ON(!list_empty(&vm->active_list));
271 } 271 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33df74d98269..f315e78f38ed 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
722 struct i915_address_space *vm; 722 struct i915_address_space *vm;
723 struct list_head ordered_vmas; 723 struct list_head ordered_vmas;
724 struct list_head pinned_vmas; 724 struct list_head pinned_vmas;
725 bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; 725 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
726 int retry; 726 int retry;
727 727
728 i915_gem_retire_requests_ring(engine); 728 i915_gem_retire_requests_ring(engine);
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
963 } 963 }
964 964
965 if (flush_chipset) 965 if (flush_chipset)
966 i915_gem_chipset_flush(req->engine->dev); 966 i915_gem_chipset_flush(req->engine->i915);
967 967
968 if (flush_domains & I915_GEM_DOMAIN_GTT) 968 if (flush_domains & I915_GEM_DOMAIN_GTT)
969 wmb(); 969 wmb();
@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1083 return ERR_PTR(-EIO); 1083 return ERR_PTR(-EIO);
1084 } 1084 }
1085 1085
1086 if (i915.enable_execlists && !ctx->engine[engine->id].state) {
1087 int ret = intel_lr_context_deferred_alloc(ctx, engine);
1088 if (ret) {
1089 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1090 return ERR_PTR(ret);
1091 }
1092 }
1093
1094 return ctx; 1086 return ctx;
1095} 1087}
1096 1088
@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1125 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 1117 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1126 i915_gem_request_assign(&obj->last_fenced_req, req); 1118 i915_gem_request_assign(&obj->last_fenced_req, req);
1127 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { 1119 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1128 struct drm_i915_private *dev_priv = to_i915(engine->dev); 1120 struct drm_i915_private *dev_priv = engine->i915;
1129 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, 1121 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1130 &dev_priv->mm.fence_list); 1122 &dev_priv->mm.fence_list);
1131 } 1123 }
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..2b6bdc267fb5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
745void 745void
746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
747{ 747{
748 struct sg_page_iter sg_iter; 748 struct sgt_iter sgt_iter;
749 struct page *page;
749 int i; 750 int i;
750 751
751 if (obj->bit_17 == NULL) 752 if (obj->bit_17 == NULL)
752 return; 753 return;
753 754
754 i = 0; 755 i = 0;
755 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 756 for_each_sgt_page(page, sgt_iter, obj->pages) {
756 struct page *page = sg_page_iter_page(&sg_iter);
757 char new_bit_17 = page_to_phys(page) >> 17; 757 char new_bit_17 = page_to_phys(page) >> 17;
758 if ((new_bit_17 & 0x1) != 758 if ((new_bit_17 & 0x1) !=
759 (test_bit(i, obj->bit_17) != 0)) { 759 (test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
775void 775void
776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
777{ 777{
778 struct sg_page_iter sg_iter; 778 struct sgt_iter sgt_iter;
779 struct page *page;
779 int page_count = obj->base.size >> PAGE_SHIFT; 780 int page_count = obj->base.size >> PAGE_SHIFT;
780 int i; 781 int i;
781 782
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
790 } 791 }
791 792
792 i = 0; 793 i = 0;
793 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 794
794 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) 795 for_each_sgt_page(page, sgt_iter, obj->pages) {
796 if (page_to_phys(page) & (1 << 17))
795 __set_bit(i, obj->bit_17); 797 __set_bit(i, obj->bit_17);
796 else 798 else
797 __clear_bit(i, obj->bit_17); 799 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 92acdff9dad3..46684779d4d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -93,6 +93,13 @@
93 * 93 *
94 */ 94 */
95 95
96static inline struct i915_ggtt *
97i915_vm_to_ggtt(struct i915_address_space *vm)
98{
99 GEM_BUG_ON(!i915_is_ggtt(vm));
100 return container_of(vm, struct i915_ggtt, base);
101}
102
96static int 103static int
97i915_get_ggtt_vma_pages(struct i915_vma *vma); 104i915_get_ggtt_vma_pages(struct i915_vma *vma);
98 105
@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
103 .type = I915_GGTT_VIEW_ROTATED, 110 .type = I915_GGTT_VIEW_ROTATED,
104}; 111};
105 112
106static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 113int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
114 int enable_ppgtt)
107{ 115{
108 bool has_aliasing_ppgtt; 116 bool has_aliasing_ppgtt;
109 bool has_full_ppgtt; 117 bool has_full_ppgtt;
110 bool has_full_48bit_ppgtt; 118 bool has_full_48bit_ppgtt;
111 119
112 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 120 has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
113 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 121 has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
114 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; 122 has_full_48bit_ppgtt =
123 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
115 124
116 if (intel_vgpu_active(dev)) 125 if (intel_vgpu_active(dev_priv))
117 has_full_ppgtt = false; /* emulation is too hard */ 126 has_full_ppgtt = false; /* emulation is too hard */
118 127
128 if (!has_aliasing_ppgtt)
129 return 0;
130
119 /* 131 /*
120 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 132 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
121 * execlists, the sole mechanism available to submit work. 133 * execlists, the sole mechanism available to submit work.
122 */ 134 */
123 if (INTEL_INFO(dev)->gen < 9 && 135 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
124 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
125 return 0; 136 return 0;
126 137
127 if (enable_ppgtt == 1) 138 if (enable_ppgtt == 1)
@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
135 146
136#ifdef CONFIG_INTEL_IOMMU 147#ifdef CONFIG_INTEL_IOMMU
137 /* Disable ppgtt on SNB if VT-d is on. */ 148 /* Disable ppgtt on SNB if VT-d is on. */
138 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 149 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
139 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 150 DRM_INFO("Disabling PPGTT because VT-d is on\n");
140 return 0; 151 return 0;
141 } 152 }
142#endif 153#endif
143 154
144 /* Early VLV doesn't have this */ 155 /* Early VLV doesn't have this */
145 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { 156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
146 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
147 return 0; 158 return 0;
148 } 159 }
149 160
150 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 161 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
151 return has_full_48bit_ppgtt ? 3 : 2; 162 return has_full_48bit_ppgtt ? 3 : 2;
152 else 163 else
153 return has_aliasing_ppgtt ? 1 : 0; 164 return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
866static int gen8_init_scratch(struct i915_address_space *vm) 877static int gen8_init_scratch(struct i915_address_space *vm)
867{ 878{
868 struct drm_device *dev = vm->dev; 879 struct drm_device *dev = vm->dev;
880 int ret;
869 881
870 vm->scratch_page = alloc_scratch_page(dev); 882 vm->scratch_page = alloc_scratch_page(dev);
871 if (IS_ERR(vm->scratch_page)) 883 if (IS_ERR(vm->scratch_page))
@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
873 885
874 vm->scratch_pt = alloc_pt(dev); 886 vm->scratch_pt = alloc_pt(dev);
875 if (IS_ERR(vm->scratch_pt)) { 887 if (IS_ERR(vm->scratch_pt)) {
876 free_scratch_page(dev, vm->scratch_page); 888 ret = PTR_ERR(vm->scratch_pt);
877 return PTR_ERR(vm->scratch_pt); 889 goto free_scratch_page;
878 } 890 }
879 891
880 vm->scratch_pd = alloc_pd(dev); 892 vm->scratch_pd = alloc_pd(dev);
881 if (IS_ERR(vm->scratch_pd)) { 893 if (IS_ERR(vm->scratch_pd)) {
882 free_pt(dev, vm->scratch_pt); 894 ret = PTR_ERR(vm->scratch_pd);
883 free_scratch_page(dev, vm->scratch_page); 895 goto free_pt;
884 return PTR_ERR(vm->scratch_pd);
885 } 896 }
886 897
887 if (USES_FULL_48BIT_PPGTT(dev)) { 898 if (USES_FULL_48BIT_PPGTT(dev)) {
888 vm->scratch_pdp = alloc_pdp(dev); 899 vm->scratch_pdp = alloc_pdp(dev);
889 if (IS_ERR(vm->scratch_pdp)) { 900 if (IS_ERR(vm->scratch_pdp)) {
890 free_pd(dev, vm->scratch_pd); 901 ret = PTR_ERR(vm->scratch_pdp);
891 free_pt(dev, vm->scratch_pt); 902 goto free_pd;
892 free_scratch_page(dev, vm->scratch_page);
893 return PTR_ERR(vm->scratch_pdp);
894 } 903 }
895 } 904 }
896 905
@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
900 gen8_initialize_pdp(vm, vm->scratch_pdp); 909 gen8_initialize_pdp(vm, vm->scratch_pdp);
901 910
902 return 0; 911 return 0;
912
913free_pd:
914 free_pd(dev, vm->scratch_pd);
915free_pt:
916 free_pt(dev, vm->scratch_pt);
917free_scratch_page:
918 free_scratch_page(dev, vm->scratch_page);
919
920 return ret;
903} 921}
904 922
905static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 923static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
978{ 996{
979 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 997 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
980 998
981 if (intel_vgpu_active(vm->dev)) 999 if (intel_vgpu_active(to_i915(vm->dev)))
982 gen8_ppgtt_notify_vgt(ppgtt, false); 1000 gen8_ppgtt_notify_vgt(ppgtt, false);
983 1001
984 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 1002 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1529 0, 0, 1547 0, 0,
1530 GEN8_PML4E_SHIFT); 1548 GEN8_PML4E_SHIFT);
1531 1549
1532 if (intel_vgpu_active(ppgtt->base.dev)) { 1550 if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
1533 ret = gen8_preallocate_top_level_pdps(ppgtt); 1551 ret = gen8_preallocate_top_level_pdps(ppgtt);
1534 if (ret) 1552 if (ret)
1535 goto free_scratch; 1553 goto free_scratch;
1536 } 1554 }
1537 } 1555 }
1538 1556
1539 if (intel_vgpu_active(ppgtt->base.dev)) 1557 if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
1540 gen8_ppgtt_notify_vgt(ppgtt, true); 1558 gen8_ppgtt_notify_vgt(ppgtt, true);
1541 1559
1542 return 0; 1560 return 0;
@@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1821 enum i915_cache_level cache_level, u32 flags) 1839 enum i915_cache_level cache_level, u32 flags)
1822{ 1840{
1823 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1841 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1824 gen6_pte_t *pt_vaddr;
1825 unsigned first_entry = start >> PAGE_SHIFT; 1842 unsigned first_entry = start >> PAGE_SHIFT;
1826 unsigned act_pt = first_entry / GEN6_PTES; 1843 unsigned act_pt = first_entry / GEN6_PTES;
1827 unsigned act_pte = first_entry % GEN6_PTES; 1844 unsigned act_pte = first_entry % GEN6_PTES;
1828 struct sg_page_iter sg_iter; 1845 gen6_pte_t *pt_vaddr = NULL;
1846 struct sgt_iter sgt_iter;
1847 dma_addr_t addr;
1829 1848
1830 pt_vaddr = NULL; 1849 for_each_sgt_dma(addr, sgt_iter, pages) {
1831 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1832 if (pt_vaddr == NULL) 1850 if (pt_vaddr == NULL)
1833 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1851 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1834 1852
1835 pt_vaddr[act_pte] = 1853 pt_vaddr[act_pte] =
1836 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1854 vm->pte_encode(addr, cache_level, true, flags);
1837 cache_level, true, flags);
1838 1855
1839 if (++act_pte == GEN6_PTES) { 1856 if (++act_pte == GEN6_PTES) {
1840 kunmap_px(ppgtt, pt_vaddr); 1857 kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1843 act_pte = 0; 1860 act_pte = 0;
1844 } 1861 }
1845 } 1862 }
1863
1846 if (pt_vaddr) 1864 if (pt_vaddr)
1847 kunmap_px(ppgtt, pt_vaddr); 1865 kunmap_px(ppgtt, pt_vaddr);
1848} 1866}
@@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2064 } else 2082 } else
2065 BUG(); 2083 BUG();
2066 2084
2067 if (intel_vgpu_active(dev)) 2085 if (intel_vgpu_active(dev_priv))
2068 ppgtt->switch_mm = vgpu_mm_switch; 2086 ppgtt->switch_mm = vgpu_mm_switch;
2069 2087
2070 ret = gen6_ppgtt_alloc(ppgtt); 2088 ret = gen6_ppgtt_alloc(ppgtt);
@@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
2140 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 2158 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2141} 2159}
2142 2160
2143int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2161static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2144{ 2162{
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2163 struct drm_i915_private *dev_priv = dev->dev_private;
2146 int ret = 0; 2164 int ret = 0;
@@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
2179 return 0; 2197 return 0;
2180} 2198}
2181 2199
2182int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2183{
2184 struct drm_i915_private *dev_priv = req->i915;
2185 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2186
2187 if (i915.enable_execlists)
2188 return 0;
2189
2190 if (!ppgtt)
2191 return 0;
2192
2193 return ppgtt->switch_mm(ppgtt, req);
2194}
2195
2196struct i915_hw_ppgtt * 2200struct i915_hw_ppgtt *
2197i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) 2201i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2198{ 2202{
@@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2275 dev_priv->mm.interruptible = interruptible; 2279 dev_priv->mm.interruptible = interruptible;
2276} 2280}
2277 2281
2278void i915_check_and_clear_faults(struct drm_device *dev) 2282void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2279{ 2283{
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 struct intel_engine_cs *engine; 2284 struct intel_engine_cs *engine;
2282 2285
2283 if (INTEL_INFO(dev)->gen < 6) 2286 if (INTEL_INFO(dev_priv)->gen < 6)
2284 return; 2287 return;
2285 2288
2286 for_each_engine(engine, dev_priv) { 2289 for_each_engine(engine, dev_priv) {
@@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2324 if (INTEL_INFO(dev)->gen < 6) 2327 if (INTEL_INFO(dev)->gen < 6)
2325 return; 2328 return;
2326 2329
2327 i915_check_and_clear_faults(dev); 2330 i915_check_and_clear_faults(dev_priv);
2328 2331
2329 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 2332 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
2330 true); 2333 true);
@@ -2358,23 +2361,21 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2358 enum i915_cache_level level, u32 unused) 2361 enum i915_cache_level level, u32 unused)
2359{ 2362{
2360 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2363 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2361 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2364 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2362 unsigned first_entry = start >> PAGE_SHIFT; 2365 struct sgt_iter sgt_iter;
2363 gen8_pte_t __iomem *gtt_entries = 2366 gen8_pte_t __iomem *gtt_entries;
2364 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2367 gen8_pte_t gtt_entry;
2365 int i = 0; 2368 dma_addr_t addr;
2366 struct sg_page_iter sg_iter;
2367 dma_addr_t addr = 0; /* shut up gcc */
2368 int rpm_atomic_seq; 2369 int rpm_atomic_seq;
2370 int i = 0;
2369 2371
2370 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2372 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2371 2373
2372 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2374 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2373 addr = sg_dma_address(sg_iter.sg) + 2375
2374 (sg_iter.sg_pgoffset << PAGE_SHIFT); 2376 for_each_sgt_dma(addr, sgt_iter, st) {
2375 gen8_set_pte(&gtt_entries[i], 2377 gtt_entry = gen8_pte_encode(addr, level, true);
2376 gen8_pte_encode(addr, level, true)); 2378 gen8_set_pte(&gtt_entries[i++], gtt_entry);
2377 i++;
2378 } 2379 }
2379 2380
2380 /* 2381 /*
@@ -2385,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2385 * hardware should work, we must keep this posting read for paranoia. 2386 * hardware should work, we must keep this posting read for paranoia.
2386 */ 2387 */
2387 if (i != 0) 2388 if (i != 0)
2388 WARN_ON(readq(&gtt_entries[i-1]) 2389 WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
2389 != gen8_pte_encode(addr, level, true));
2390 2390
2391 /* This next bit makes the above posting read even more important. We 2391 /* This next bit makes the above posting read even more important. We
2392 * want to flush the TLBs only after we're certain all the PTE updates 2392 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2436,21 +2436,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2436 enum i915_cache_level level, u32 flags) 2436 enum i915_cache_level level, u32 flags)
2437{ 2437{
2438 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2438 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2439 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2439 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2440 unsigned first_entry = start >> PAGE_SHIFT; 2440 struct sgt_iter sgt_iter;
2441 gen6_pte_t __iomem *gtt_entries = 2441 gen6_pte_t __iomem *gtt_entries;
2442 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2442 gen6_pte_t gtt_entry;
2443 int i = 0; 2443 dma_addr_t addr;
2444 struct sg_page_iter sg_iter;
2445 dma_addr_t addr = 0;
2446 int rpm_atomic_seq; 2444 int rpm_atomic_seq;
2445 int i = 0;
2447 2446
2448 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2447 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2449 2448
2450 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2449 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2451 addr = sg_page_iter_dma_address(&sg_iter); 2450
2452 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); 2451 for_each_sgt_dma(addr, sgt_iter, st) {
2453 i++; 2452 gtt_entry = vm->pte_encode(addr, level, true, flags);
2453 iowrite32(gtt_entry, &gtt_entries[i++]);
2454 } 2454 }
2455 2455
2456 /* XXX: This serves as a posting read to make sure that the PTE has 2456 /* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2459 * of NUMA access patterns. Therefore, even with the way we assume 2459 * of NUMA access patterns. Therefore, even with the way we assume
2460 * hardware should work, we must keep this posting read for paranoia. 2460 * hardware should work, we must keep this posting read for paranoia.
2461 */ 2461 */
2462 if (i != 0) { 2462 if (i != 0)
2463 unsigned long gtt = readl(&gtt_entries[i-1]); 2463 WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2464 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2465 }
2466 2464
2467 /* This next bit makes the above posting read even more important. We 2465 /* This next bit makes the above posting read even more important. We
2468 * want to flush the TLBs only after we're certain all the PTE updates 2466 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2472,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2474 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2472 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2475} 2473}
2476 2474
2475static void nop_clear_range(struct i915_address_space *vm,
2476 uint64_t start,
2477 uint64_t length,
2478 bool use_scratch)
2479{
2480}
2481
2477static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2482static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2478 uint64_t start, 2483 uint64_t start,
2479 uint64_t length, 2484 uint64_t length,
2480 bool use_scratch) 2485 bool use_scratch)
2481{ 2486{
2482 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2487 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2483 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2488 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2484 unsigned first_entry = start >> PAGE_SHIFT; 2489 unsigned first_entry = start >> PAGE_SHIFT;
2485 unsigned num_entries = length >> PAGE_SHIFT; 2490 unsigned num_entries = length >> PAGE_SHIFT;
2486 gen8_pte_t scratch_pte, __iomem *gtt_base = 2491 gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2517,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2512 bool use_scratch) 2517 bool use_scratch)
2513{ 2518{
2514 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2519 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2515 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2520 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2516 unsigned first_entry = start >> PAGE_SHIFT; 2521 unsigned first_entry = start >> PAGE_SHIFT;
2517 unsigned num_entries = length >> PAGE_SHIFT; 2522 unsigned num_entries = length >> PAGE_SHIFT;
2518 gen6_pte_t scratch_pte, __iomem *gtt_base = 2523 gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2727,7 +2732,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2727 i915_address_space_init(&ggtt->base, dev_priv); 2732 i915_address_space_init(&ggtt->base, dev_priv);
2728 ggtt->base.total += PAGE_SIZE; 2733 ggtt->base.total += PAGE_SIZE;
2729 2734
2730 if (intel_vgpu_active(dev)) { 2735 if (intel_vgpu_active(dev_priv)) {
2731 ret = intel_vgt_balloon(dev); 2736 ret = intel_vgt_balloon(dev);
2732 if (ret) 2737 if (ret)
2733 return ret; 2738 return ret;
@@ -2831,7 +2836,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
2831 i915_gem_cleanup_stolen(dev); 2836 i915_gem_cleanup_stolen(dev);
2832 2837
2833 if (drm_mm_initialized(&ggtt->base.mm)) { 2838 if (drm_mm_initialized(&ggtt->base.mm)) {
2834 if (intel_vgpu_active(dev)) 2839 if (intel_vgpu_active(dev_priv))
2835 intel_vgt_deballoon(); 2840 intel_vgt_deballoon();
2836 2841
2837 drm_mm_takedown(&ggtt->base.mm); 2842 drm_mm_takedown(&ggtt->base.mm);
@@ -3069,14 +3074,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3069 3074
3070 ret = ggtt_probe_common(dev, ggtt->size); 3075 ret = ggtt_probe_common(dev, ggtt->size);
3071 3076
3072 ggtt->base.clear_range = gen8_ggtt_clear_range;
3073 if (IS_CHERRYVIEW(dev_priv))
3074 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3075 else
3076 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3077 ggtt->base.bind_vma = ggtt_bind_vma; 3077 ggtt->base.bind_vma = ggtt_bind_vma;
3078 ggtt->base.unbind_vma = ggtt_unbind_vma; 3078 ggtt->base.unbind_vma = ggtt_unbind_vma;
3079 3079
3080 ggtt->base.clear_range = nop_clear_range;
3081 if (!USES_FULL_PPGTT(dev_priv))
3082 ggtt->base.clear_range = gen8_ggtt_clear_range;
3083
3084 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3085 if (IS_CHERRYVIEW(dev_priv))
3086 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3087
3080 return ret; 3088 return ret;
3081} 3089}
3082 3090
@@ -3219,14 +3227,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
3219 if (intel_iommu_gfx_mapped) 3227 if (intel_iommu_gfx_mapped)
3220 DRM_INFO("VT-d active for gfx access\n"); 3228 DRM_INFO("VT-d active for gfx access\n");
3221#endif 3229#endif
3222 /*
3223 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3224 * user's requested state against the hardware/driver capabilities. We
3225 * do this now so that we can print out any log messages once rather
3226 * than every time we check intel_enable_ppgtt().
3227 */
3228 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3229 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3230 3230
3231 return 0; 3231 return 0;
3232 3232
@@ -3250,9 +3250,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3250 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3250 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3251 struct drm_i915_gem_object *obj; 3251 struct drm_i915_gem_object *obj;
3252 struct i915_vma *vma; 3252 struct i915_vma *vma;
3253 bool flush;
3254 3253
3255 i915_check_and_clear_faults(dev); 3254 i915_check_and_clear_faults(dev_priv);
3256 3255
3257 /* First fill our portion of the GTT with scratch pages */ 3256 /* First fill our portion of the GTT with scratch pages */
3258 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 3257 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3260,19 +3259,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3260 3259
3261 /* Cache flush objects bound into GGTT and rebind them. */ 3260 /* Cache flush objects bound into GGTT and rebind them. */
3262 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 3261 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3263 flush = false;
3264 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3262 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3265 if (vma->vm != &ggtt->base) 3263 if (vma->vm != &ggtt->base)
3266 continue; 3264 continue;
3267 3265
3268 WARN_ON(i915_vma_bind(vma, obj->cache_level, 3266 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3269 PIN_UPDATE)); 3267 PIN_UPDATE));
3270
3271 flush = true;
3272 } 3268 }
3273 3269
3274 if (flush) 3270 if (obj->pin_display)
3275 i915_gem_clflush_object(obj, obj->pin_display); 3271 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3276 } 3272 }
3277 3273
3278 if (INTEL_INFO(dev)->gen >= 8) { 3274 if (INTEL_INFO(dev)->gen >= 8) {
@@ -3398,9 +3394,11 @@ static struct sg_table *
3398intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, 3394intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3399 struct drm_i915_gem_object *obj) 3395 struct drm_i915_gem_object *obj)
3400{ 3396{
3397 const size_t n_pages = obj->base.size / PAGE_SIZE;
3401 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; 3398 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
3402 unsigned int size_pages_uv; 3399 unsigned int size_pages_uv;
3403 struct sg_page_iter sg_iter; 3400 struct sgt_iter sgt_iter;
3401 dma_addr_t dma_addr;
3404 unsigned long i; 3402 unsigned long i;
3405 dma_addr_t *page_addr_list; 3403 dma_addr_t *page_addr_list;
3406 struct sg_table *st; 3404 struct sg_table *st;
@@ -3409,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3409 int ret = -ENOMEM; 3407 int ret = -ENOMEM;
3410 3408
3411 /* Allocate a temporary list of source pages for random access. */ 3409 /* Allocate a temporary list of source pages for random access. */
3412 page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, 3410 page_addr_list = drm_malloc_gfp(n_pages,
3413 sizeof(dma_addr_t), 3411 sizeof(dma_addr_t),
3414 GFP_TEMPORARY); 3412 GFP_TEMPORARY);
3415 if (!page_addr_list) 3413 if (!page_addr_list)
@@ -3432,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3432 3430
3433 /* Populate source page list from the object. */ 3431 /* Populate source page list from the object. */
3434 i = 0; 3432 i = 0;
3435 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 3433 for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
3436 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); 3434 page_addr_list[i++] = dma_addr;
3437 i++;
3438 }
3439 3435
3436 GEM_BUG_ON(i != n_pages);
3440 st->nents = 0; 3437 st->nents = 0;
3441 sg = st->sgl; 3438 sg = st->sgl;
3442 3439
@@ -3634,3 +3631,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3634 return obj->base.size; 3631 return obj->base.size;
3635 } 3632 }
3636} 3633}
3634
3635void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
3636{
3637 void __iomem *ptr;
3638
3639 lockdep_assert_held(&vma->vm->dev->struct_mutex);
3640 if (WARN_ON(!vma->obj->map_and_fenceable))
3641 return ERR_PTR(-ENODEV);
3642
3643 GEM_BUG_ON(!vma->is_ggtt);
3644 GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
3645
3646 ptr = vma->iomap;
3647 if (ptr == NULL) {
3648 ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
3649 vma->node.start,
3650 vma->node.size);
3651 if (ptr == NULL)
3652 return ERR_PTR(-ENOMEM);
3653
3654 vma->iomap = ptr;
3655 }
3656
3657 vma->pin_count++;
3658 return ptr;
3659}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0008543d55f6..62be77cac5cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
34#ifndef __I915_GEM_GTT_H__ 34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__ 35#define __I915_GEM_GTT_H__
36 36
37#include <linux/io-mapping.h>
38
37struct drm_i915_file_private; 39struct drm_i915_file_private;
38 40
39typedef uint32_t gen6_pte_t; 41typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
175 struct drm_mm_node node; 177 struct drm_mm_node node;
176 struct drm_i915_gem_object *obj; 178 struct drm_i915_gem_object *obj;
177 struct i915_address_space *vm; 179 struct i915_address_space *vm;
180 void __iomem *iomap;
178 181
179 /** Flags and address space this VMA is bound to */ 182 /** Flags and address space this VMA is bound to */
180#define GLOBAL_BIND (1<<0) 183#define GLOBAL_BIND (1<<0)
@@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
518void i915_gem_init_ggtt(struct drm_device *dev); 521void i915_gem_init_ggtt(struct drm_device *dev);
519void i915_ggtt_cleanup_hw(struct drm_device *dev); 522void i915_ggtt_cleanup_hw(struct drm_device *dev);
520 523
521int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
522int i915_ppgtt_init_hw(struct drm_device *dev); 524int i915_ppgtt_init_hw(struct drm_device *dev);
523int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
524void i915_ppgtt_release(struct kref *kref); 525void i915_ppgtt_release(struct kref *kref);
525struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, 526struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
526 struct drm_i915_file_private *fpriv); 527 struct drm_i915_file_private *fpriv);
@@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
535 kref_put(&ppgtt->ref, i915_ppgtt_release); 536 kref_put(&ppgtt->ref, i915_ppgtt_release);
536} 537}
537 538
538void i915_check_and_clear_faults(struct drm_device *dev); 539void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
539void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 540void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
540void i915_gem_restore_gtt_mappings(struct drm_device *dev); 541void i915_gem_restore_gtt_mappings(struct drm_device *dev);
541 542
@@ -560,4 +561,36 @@ size_t
560i915_ggtt_view_size(struct drm_i915_gem_object *obj, 561i915_ggtt_view_size(struct drm_i915_gem_object *obj,
561 const struct i915_ggtt_view *view); 562 const struct i915_ggtt_view *view);
562 563
564/**
565 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
566 * @vma: VMA to iomap
567 *
568 * The passed in VMA has to be pinned in the global GTT mappable region.
569 * An extra pinning of the VMA is acquired for the return iomapping,
570 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
571 * after the iomapping is no longer required.
572 *
573 * Callers must hold the struct_mutex.
574 *
575 * Returns a valid iomapped pointer or ERR_PTR.
576 */
577void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
578
579/**
580 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
581 * @vma: VMA to unpin
582 *
583 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
584 *
585 * Callers must hold the struct_mutex. This function is only valid to be
586 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
587 */
588static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
589{
590 lockdep_assert_held(&vma->vm->dev->struct_mutex);
591 GEM_BUG_ON(vma->pin_count == 0);
592 GEM_BUG_ON(vma->iomap == NULL);
593 vma->pin_count--;
594}
595
563#endif 596#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf21fca..7c93327b70fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@
29#include "intel_renderstate.h" 29#include "intel_renderstate.h"
30 30
31static const struct intel_renderstate_rodata * 31static const struct intel_renderstate_rodata *
32render_state_get_rodata(struct drm_device *dev, const int gen) 32render_state_get_rodata(const int gen)
33{ 33{
34 switch (gen) { 34 switch (gen) {
35 case 6: 35 case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
45 return NULL; 45 return NULL;
46} 46}
47 47
48static int render_state_init(struct render_state *so, struct drm_device *dev) 48static int render_state_init(struct render_state *so,
49 struct drm_i915_private *dev_priv)
49{ 50{
50 int ret; 51 int ret;
51 52
52 so->gen = INTEL_INFO(dev)->gen; 53 so->gen = INTEL_GEN(dev_priv);
53 so->rodata = render_state_get_rodata(dev, so->gen); 54 so->rodata = render_state_get_rodata(so->gen);
54 if (so->rodata == NULL) 55 if (so->rodata == NULL)
55 return 0; 56 return 0;
56 57
57 if (so->rodata->batch_items * 4 > 4096) 58 if (so->rodata->batch_items * 4 > 4096)
58 return -EINVAL; 59 return -EINVAL;
59 60
60 so->obj = i915_gem_alloc_object(dev, 4096); 61 so->obj = i915_gem_object_create(dev_priv->dev, 4096);
61 if (so->obj == NULL) 62 if (IS_ERR(so->obj))
62 return -ENOMEM; 63 return PTR_ERR(so->obj);
63 64
64 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); 65 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
65 if (ret) 66 if (ret)
@@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
177 if (WARN_ON(engine->id != RCS)) 178 if (WARN_ON(engine->id != RCS))
178 return -ENOENT; 179 return -ENOENT;
179 180
180 ret = render_state_init(so, engine->dev); 181 ret = render_state_init(so, engine->i915);
181 if (ret) 182 if (ret)
182 return ret; 183 return ret;
183 184
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 425e721aac58..538c30499848 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
131 unsigned long count = 0; 131 unsigned long count = 0;
132 132
133 trace_i915_gem_shrink(dev_priv, target, flags); 133 trace_i915_gem_shrink(dev_priv, target, flags);
134 i915_gem_retire_requests(dev_priv->dev); 134 i915_gem_retire_requests(dev_priv);
135
136 /*
137 * Unbinding of objects will require HW access; Let us not wake the
138 * device just to recover a little memory. If absolutely necessary,
139 * we will force the wake during oom-notifier.
140 */
141 if ((flags & I915_SHRINK_BOUND) &&
142 !intel_runtime_pm_get_if_in_use(dev_priv))
143 flags &= ~I915_SHRINK_BOUND;
135 144
136 /* 145 /*
137 * As we may completely rewrite the (un)bound list whilst unbinding 146 * As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
197 list_splice(&still_in_list, phase->list); 206 list_splice(&still_in_list, phase->list);
198 } 207 }
199 208
200 i915_gem_retire_requests(dev_priv->dev); 209 if (flags & I915_SHRINK_BOUND)
210 intel_runtime_pm_put(dev_priv);
211
212 i915_gem_retire_requests(dev_priv);
201 213
202 return count; 214 return count;
203} 215}
@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
345 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 357 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
346 return NOTIFY_DONE; 358 return NOTIFY_DONE;
347 359
360 intel_runtime_pm_get(dev_priv);
348 freed_pages = i915_gem_shrink_all(dev_priv); 361 freed_pages = i915_gem_shrink_all(dev_priv);
362 intel_runtime_pm_put(dev_priv);
349 363
350 /* Because we may be allocating inside our own driver, we cannot 364 /* Because we may be allocating inside our own driver, we cannot
351 * assert that there are no objects with pinned pages that are not 365 * assert that there are no objects with pinned pages that are not
@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
386 struct drm_i915_private *dev_priv = 400 struct drm_i915_private *dev_priv =
387 container_of(nb, struct drm_i915_private, mm.vmap_notifier); 401 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
388 struct shrinker_lock_uninterruptible slu; 402 struct shrinker_lock_uninterruptible slu;
389 unsigned long freed_pages; 403 struct i915_vma *vma, *next;
404 unsigned long freed_pages = 0;
405 int ret;
390 406
391 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 407 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
392 return NOTIFY_DONE; 408 return NOTIFY_DONE;
393 409
394 freed_pages = i915_gem_shrink(dev_priv, -1UL, 410 /* Force everything onto the inactive lists */
395 I915_SHRINK_BOUND | 411 ret = i915_gpu_idle(dev_priv->dev);
396 I915_SHRINK_UNBOUND | 412 if (ret)
397 I915_SHRINK_ACTIVE | 413 goto out;
398 I915_SHRINK_VMAPS); 414
415 intel_runtime_pm_get(dev_priv);
416 freed_pages += i915_gem_shrink(dev_priv, -1UL,
417 I915_SHRINK_BOUND |
418 I915_SHRINK_UNBOUND |
419 I915_SHRINK_ACTIVE |
420 I915_SHRINK_VMAPS);
421 intel_runtime_pm_put(dev_priv);
422
423 /* We also want to clear any cached iomaps as they wrap vmap */
424 list_for_each_entry_safe(vma, next,
425 &dev_priv->ggtt.base.inactive_list, vm_link) {
426 unsigned long count = vma->node.size >> PAGE_SHIFT;
427 if (vma->iomap && i915_vma_unbind(vma) == 0)
428 freed_pages += count;
429 }
399 430
431out:
400 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); 432 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
401 433
402 *(unsigned long *)ptr += freed_pages; 434 *(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b7ce963fb8f8..f9253f2b7ba0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
56 56
57 /* See the comment at the drm_mm_init() call for more about this check. 57 /* See the comment at the drm_mm_init() call for more about this check.
58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ 58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
59 if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) 59 if (IS_GEN8(dev_priv) && start < 4096)
60 start = 4096; 60 start = 4096;
61 61
62 mutex_lock(&dev_priv->mm.stolen_lock); 62 mutex_lock(&dev_priv->mm.stolen_lock);
@@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
109 if (INTEL_INFO(dev)->gen >= 3) { 109 if (INTEL_INFO(dev)->gen >= 3) {
110 u32 bsm; 110 u32 bsm;
111 111
112 pci_read_config_dword(dev->pdev, BSM, &bsm); 112 pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
113 113
114 base = bsm & BSM_MASK; 114 base = bsm & INTEL_BSM_MASK;
115 } else if (IS_I865G(dev)) { 115 } else if (IS_I865G(dev)) {
116 u16 toud = 0; 116 u16 toud = 0;
117 117
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b9bdb34032cd..a6eb5c47a49c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
125 if (INTEL_INFO(obj->base.dev)->gen >= 4) 125 if (INTEL_INFO(obj->base.dev)->gen >= 4)
126 return true; 126 return true;
127 127
128 if (INTEL_INFO(obj->base.dev)->gen == 3) { 128 if (IS_GEN3(obj->base.dev)) {
129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) 129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
130 return false; 130 return false;
131 } else { 131 } else {
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
229 */ 229 */
230 if (obj->map_and_fenceable && 230 if (obj->map_and_fenceable &&
231 !i915_gem_object_fence_ok(obj, args->tiling_mode)) 231 !i915_gem_object_fence_ok(obj, args->tiling_mode))
232 ret = i915_gem_object_ggtt_unbind(obj); 232 ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
233 233
234 if (ret == 0) { 234 if (ret == 0) {
235 if (obj->pages && 235 if (obj->pages &&
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e38b1..2314c88323e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
706static void 706static void
707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
708{ 708{
709 struct sg_page_iter sg_iter; 709 struct sgt_iter sgt_iter;
710 struct page *page;
710 711
711 BUG_ON(obj->userptr.work != NULL); 712 BUG_ON(obj->userptr.work != NULL);
712 __i915_gem_userptr_set_active(obj, false); 713 __i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
716 717
717 i915_gem_gtt_finish_object(obj); 718 i915_gem_gtt_finish_object(obj);
718 719
719 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 720 for_each_sgt_page(page, sgt_iter, obj->pages) {
720 struct page *page = sg_page_iter_page(&sg_iter);
721
722 if (obj->dirty) 721 if (obj->dirty)
723 set_page_dirty(page); 722 set_page_dirty(page);
724 723
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
855 return 0; 854 return 0;
856} 855}
857 856
858int 857void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
859i915_gem_init_userptr(struct drm_device *dev)
860{ 858{
861 struct drm_i915_private *dev_priv = to_i915(dev);
862 mutex_init(&dev_priv->mm_lock); 859 mutex_init(&dev_priv->mm_lock);
863 hash_init(dev_priv->mm_structs); 860 hash_init(dev_priv->mm_structs);
864 return 0;
865} 861}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..34ff2459ceea 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
412 } 412 }
413 413
414 if (INTEL_INFO(dev)->gen == 7) 414 if (IS_GEN7(dev))
415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
416 416
417 for (i = 0; i < ARRAY_SIZE(error->ring); i++) 417 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
824 return error_code; 824 return error_code;
825} 825}
826 826
827static void i915_gem_record_fences(struct drm_device *dev, 827static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
828 struct drm_i915_error_state *error) 828 struct drm_i915_error_state *error)
829{ 829{
830 struct drm_i915_private *dev_priv = dev->dev_private;
831 int i; 830 int i;
832 831
833 if (IS_GEN3(dev) || IS_GEN2(dev)) { 832 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
834 for (i = 0; i < dev_priv->num_fence_regs; i++) 833 for (i = 0; i < dev_priv->num_fence_regs; i++)
835 error->fence[i] = I915_READ(FENCE_REG(i)); 834 error->fence[i] = I915_READ(FENCE_REG(i));
836 } else if (IS_GEN5(dev) || IS_GEN4(dev)) { 835 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
837 for (i = 0; i < dev_priv->num_fence_regs; i++) 836 for (i = 0; i < dev_priv->num_fence_regs; i++)
838 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); 837 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
839 } else if (INTEL_INFO(dev)->gen >= 6) { 838 } else if (INTEL_GEN(dev_priv) >= 6) {
840 for (i = 0; i < dev_priv->num_fence_regs; i++) 839 for (i = 0; i < dev_priv->num_fence_regs; i++)
841 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); 840 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
842 } 841 }
@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
851 struct intel_engine_cs *to; 850 struct intel_engine_cs *to;
852 enum intel_engine_id id; 851 enum intel_engine_id id;
853 852
854 if (!i915_semaphore_is_enabled(dev_priv->dev)) 853 if (!i915_semaphore_is_enabled(dev_priv))
855 return; 854 return;
856 855
857 if (!error->semaphore_obj) 856 if (!error->semaphore_obj)
@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
893 } 892 }
894} 893}
895 894
896static void i915_record_ring_state(struct drm_device *dev, 895static void i915_record_ring_state(struct drm_i915_private *dev_priv,
897 struct drm_i915_error_state *error, 896 struct drm_i915_error_state *error,
898 struct intel_engine_cs *engine, 897 struct intel_engine_cs *engine,
899 struct drm_i915_error_ring *ering) 898 struct drm_i915_error_ring *ering)
900{ 899{
901 struct drm_i915_private *dev_priv = dev->dev_private; 900 if (INTEL_GEN(dev_priv) >= 6) {
902
903 if (INTEL_INFO(dev)->gen >= 6) {
904 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); 901 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
905 ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); 902 ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
906 if (INTEL_INFO(dev)->gen >= 8) 903 if (INTEL_GEN(dev_priv) >= 8)
907 gen8_record_semaphore_state(dev_priv, error, engine, 904 gen8_record_semaphore_state(dev_priv, error, engine,
908 ering); 905 ering);
909 else 906 else
910 gen6_record_semaphore_state(dev_priv, engine, ering); 907 gen6_record_semaphore_state(dev_priv, engine, ering);
911 } 908 }
912 909
913 if (INTEL_INFO(dev)->gen >= 4) { 910 if (INTEL_GEN(dev_priv) >= 4) {
914 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); 911 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
915 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); 912 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
916 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 913 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
917 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); 914 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
918 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); 915 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
919 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); 916 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
920 if (INTEL_INFO(dev)->gen >= 8) { 917 if (INTEL_GEN(dev_priv) >= 8) {
921 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; 918 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
922 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; 919 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
923 } 920 }
@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
939 ering->tail = I915_READ_TAIL(engine); 936 ering->tail = I915_READ_TAIL(engine);
940 ering->ctl = I915_READ_CTL(engine); 937 ering->ctl = I915_READ_CTL(engine);
941 938
942 if (I915_NEED_GFX_HWS(dev)) { 939 if (I915_NEED_GFX_HWS(dev_priv)) {
943 i915_reg_t mmio; 940 i915_reg_t mmio;
944 941
945 if (IS_GEN7(dev)) { 942 if (IS_GEN7(dev_priv)) {
946 switch (engine->id) { 943 switch (engine->id) {
947 default: 944 default:
948 case RCS: 945 case RCS:
@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
958 mmio = VEBOX_HWS_PGA_GEN7; 955 mmio = VEBOX_HWS_PGA_GEN7;
959 break; 956 break;
960 } 957 }
961 } else if (IS_GEN6(engine->dev)) { 958 } else if (IS_GEN6(engine->i915)) {
962 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 959 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
963 } else { 960 } else {
964 /* XXX: gen8 returns to sanity */ 961 /* XXX: gen8 returns to sanity */
@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
971 ering->hangcheck_score = engine->hangcheck.score; 968 ering->hangcheck_score = engine->hangcheck.score;
972 ering->hangcheck_action = engine->hangcheck.action; 969 ering->hangcheck_action = engine->hangcheck.action;
973 970
974 if (USES_PPGTT(dev)) { 971 if (USES_PPGTT(dev_priv)) {
975 int i; 972 int i;
976 973
977 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); 974 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
978 975
979 if (IS_GEN6(dev)) 976 if (IS_GEN6(dev_priv))
980 ering->vm_info.pp_dir_base = 977 ering->vm_info.pp_dir_base =
981 I915_READ(RING_PP_DIR_BASE_READ(engine)); 978 I915_READ(RING_PP_DIR_BASE_READ(engine));
982 else if (IS_GEN7(dev)) 979 else if (IS_GEN7(dev_priv))
983 ering->vm_info.pp_dir_base = 980 ering->vm_info.pp_dir_base =
984 I915_READ(RING_PP_DIR_BASE(engine)); 981 I915_READ(RING_PP_DIR_BASE(engine));
985 else if (INTEL_INFO(dev)->gen >= 8) 982 else if (INTEL_GEN(dev_priv) >= 8)
986 for (i = 0; i < 4; i++) { 983 for (i = 0; i < 4; i++) {
987 ering->vm_info.pdp[i] = 984 ering->vm_info.pdp[i] =
988 I915_READ(GEN8_RING_PDP_UDW(engine, i)); 985 I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
998 struct drm_i915_error_state *error, 995 struct drm_i915_error_state *error,
999 struct drm_i915_error_ring *ering) 996 struct drm_i915_error_ring *ering)
1000{ 997{
1001 struct drm_i915_private *dev_priv = engine->dev->dev_private; 998 struct drm_i915_private *dev_priv = engine->i915;
1002 struct drm_i915_gem_object *obj; 999 struct drm_i915_gem_object *obj;
1003 1000
1004 /* Currently render ring is the only HW context user */ 1001 /* Currently render ring is the only HW context user */
@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
1016 } 1013 }
1017} 1014}
1018 1015
1019static void i915_gem_record_rings(struct drm_device *dev, 1016static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1020 struct drm_i915_error_state *error) 1017 struct drm_i915_error_state *error)
1021{ 1018{
1022 struct drm_i915_private *dev_priv = to_i915(dev);
1023 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1019 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1024 struct drm_i915_gem_request *request; 1020 struct drm_i915_gem_request *request;
1025 int i, count; 1021 int i, count;
@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
1030 1026
1031 error->ring[i].pid = -1; 1027 error->ring[i].pid = -1;
1032 1028
1033 if (engine->dev == NULL) 1029 if (!intel_engine_initialized(engine))
1034 continue; 1030 continue;
1035 1031
1036 error->ring[i].valid = true; 1032 error->ring[i].valid = true;
1037 1033
1038 i915_record_ring_state(dev, error, engine, &error->ring[i]); 1034 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
1039 1035
1040 request = i915_gem_find_active_request(engine); 1036 request = i915_gem_find_active_request(engine);
1041 if (request) { 1037 if (request) {
@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1301 error->eir = I915_READ(EIR); 1297 error->eir = I915_READ(EIR);
1302 error->pgtbl_er = I915_READ(PGTBL_ER); 1298 error->pgtbl_er = I915_READ(PGTBL_ER);
1303 1299
1304 i915_get_extra_instdone(dev, error->extra_instdone); 1300 i915_get_extra_instdone(dev_priv, error->extra_instdone);
1305} 1301}
1306 1302
1307static void i915_error_capture_msg(struct drm_device *dev, 1303static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1308 struct drm_i915_error_state *error, 1304 struct drm_i915_error_state *error,
1309 u32 engine_mask, 1305 u32 engine_mask,
1310 const char *error_msg) 1306 const char *error_msg)
1311{ 1307{
1312 struct drm_i915_private *dev_priv = dev->dev_private;
1313 u32 ecode; 1308 u32 ecode;
1314 int ring_id = -1, len; 1309 int ring_id = -1, len;
1315 1310
@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
1317 1312
1318 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1313 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1319 "GPU HANG: ecode %d:%d:0x%08x", 1314 "GPU HANG: ecode %d:%d:0x%08x",
1320 INTEL_INFO(dev)->gen, ring_id, ecode); 1315 INTEL_GEN(dev_priv), ring_id, ecode);
1321 1316
1322 if (ring_id != -1 && error->ring[ring_id].pid != -1) 1317 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1323 len += scnprintf(error->error_msg + len, 1318 len += scnprintf(error->error_msg + len,
@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1352 * out a structure which becomes available in debugfs for user level tools 1347 * out a structure which becomes available in debugfs for user level tools
1353 * to pick up. 1348 * to pick up.
1354 */ 1349 */
1355void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 1350void i915_capture_error_state(struct drm_i915_private *dev_priv,
1351 u32 engine_mask,
1356 const char *error_msg) 1352 const char *error_msg)
1357{ 1353{
1358 static bool warned; 1354 static bool warned;
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360 struct drm_i915_error_state *error; 1355 struct drm_i915_error_state *error;
1361 unsigned long flags; 1356 unsigned long flags;
1362 1357
@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1372 i915_capture_gen_state(dev_priv, error); 1367 i915_capture_gen_state(dev_priv, error);
1373 i915_capture_reg_state(dev_priv, error); 1368 i915_capture_reg_state(dev_priv, error);
1374 i915_gem_capture_buffers(dev_priv, error); 1369 i915_gem_capture_buffers(dev_priv, error);
1375 i915_gem_record_fences(dev, error); 1370 i915_gem_record_fences(dev_priv, error);
1376 i915_gem_record_rings(dev, error); 1371 i915_gem_record_rings(dev_priv, error);
1377 1372
1378 do_gettimeofday(&error->time); 1373 do_gettimeofday(&error->time);
1379 1374
1380 error->overlay = intel_overlay_capture_error_state(dev); 1375 error->overlay = intel_overlay_capture_error_state(dev_priv);
1381 error->display = intel_display_capture_error_state(dev); 1376 error->display = intel_display_capture_error_state(dev_priv);
1382 1377
1383 i915_error_capture_msg(dev, error, engine_mask, error_msg); 1378 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1384 DRM_INFO("%s\n", error->error_msg); 1379 DRM_INFO("%s\n", error->error_msg);
1385 1380
1386 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1381 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1400 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1395 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1401 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1396 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1402 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1397 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1403 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); 1398 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
1404 warned = true; 1399 warned = true;
1405 } 1400 }
1406} 1401}
@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1450} 1445}
1451 1446
1452/* NB: please notice the memset */ 1447/* NB: please notice the memset */
1453void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) 1448void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
1449 uint32_t *instdone)
1454{ 1450{
1455 struct drm_i915_private *dev_priv = dev->dev_private;
1456 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1451 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1457 1452
1458 if (IS_GEN2(dev) || IS_GEN3(dev)) 1453 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
1459 instdone[0] = I915_READ(GEN2_INSTDONE); 1454 instdone[0] = I915_READ(GEN2_INSTDONE);
1460 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1455 else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
1461 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1456 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1462 instdone[1] = I915_READ(GEN4_INSTDONE1); 1457 instdone[1] = I915_READ(GEN4_INSTDONE1);
1463 } else if (INTEL_INFO(dev)->gen >= 7) { 1458 } else if (INTEL_GEN(dev_priv) >= 7) {
1464 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1459 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1465 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1460 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1466 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1461 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 80786d9f9ad3..cf5a65be4fe0 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -67,11 +67,11 @@
67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ 67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) 68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
69 69
70/* Defines WOPCM space available to GuC firmware */
70#define GUC_WOPCM_SIZE _MMIO(0xc050) 71#define GUC_WOPCM_SIZE _MMIO(0xc050)
71#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
72
73/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ 72/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
74#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) 73#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
74#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
75 75
76#define GEN8_GT_PM_CONFIG _MMIO(0x138140) 76#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) 77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d40c13fb6643..169242a8adff 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
158 158
159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
160 /* WaRsDisableCoarsePowerGating:skl,bxt */ 160 /* WaRsDisableCoarsePowerGating:skl,bxt */
161 if (!intel_enable_rc6(dev) || 161 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
162 NEEDS_WaRsDisableCoarsePowerGating(dev))
163 data[1] = 0; 162 data[1] = 0;
164 else 163 else
165 /* bit 0 and 1 are for Render and Media domain separately */ 164 /* bit 0 and 1 are for Render and Media domain separately */
@@ -587,8 +586,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
587 struct drm_i915_private *dev_priv = dev->dev_private; 586 struct drm_i915_private *dev_priv = dev->dev_private;
588 struct drm_i915_gem_object *obj; 587 struct drm_i915_gem_object *obj;
589 588
590 obj = i915_gem_alloc_object(dev, size); 589 obj = i915_gem_object_create(dev, size);
591 if (!obj) 590 if (IS_ERR(obj))
592 return NULL; 591 return NULL;
593 592
594 if (i915_gem_object_get_pages(obj)) { 593 if (i915_gem_object_get_pages(obj)) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f6fd33c07ba..3242a37fb304 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
336 __gen6_disable_pm_irq(dev_priv, mask); 336 __gen6_disable_pm_irq(dev_priv, mask);
337} 337}
338 338
339void gen6_reset_rps_interrupts(struct drm_device *dev) 339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
340{ 340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 i915_reg_t reg = gen6_pm_iir(dev_priv); 341 i915_reg_t reg = gen6_pm_iir(dev_priv);
343 342
344 spin_lock_irq(&dev_priv->irq_lock); 343 spin_lock_irq(&dev_priv->irq_lock);
@@ -349,10 +348,8 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
349 spin_unlock_irq(&dev_priv->irq_lock); 348 spin_unlock_irq(&dev_priv->irq_lock);
350} 349}
351 350
352void gen6_enable_rps_interrupts(struct drm_device *dev) 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private;
355
356 spin_lock_irq(&dev_priv->irq_lock); 353 spin_lock_irq(&dev_priv->irq_lock);
357 354
358 WARN_ON(dev_priv->rps.pm_iir); 355 WARN_ON(dev_priv->rps.pm_iir);
@@ -382,10 +379,8 @@ u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
382 return mask; 379 return mask;
383} 380}
384 381
385void gen6_disable_rps_interrupts(struct drm_device *dev) 382void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
386{ 383{
387 struct drm_i915_private *dev_priv = dev->dev_private;
388
389 spin_lock_irq(&dev_priv->irq_lock); 384 spin_lock_irq(&dev_priv->irq_lock);
390 dev_priv->rps.interrupts_enabled = false; 385 dev_priv->rps.interrupts_enabled = false;
391 spin_unlock_irq(&dev_priv->irq_lock); 386 spin_unlock_irq(&dev_priv->irq_lock);
@@ -402,7 +397,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
402 397
403 spin_unlock_irq(&dev_priv->irq_lock); 398 spin_unlock_irq(&dev_priv->irq_lock);
404 399
405 synchronize_irq(dev->irq); 400 synchronize_irq(dev_priv->dev->irq);
406} 401}
407 402
408/** 403/**
@@ -607,17 +602,15 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
607 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 602 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
608 * @dev: drm device 603 * @dev: drm device
609 */ 604 */
610static void i915_enable_asle_pipestat(struct drm_device *dev) 605static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
611{ 606{
612 struct drm_i915_private *dev_priv = dev->dev_private; 607 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
613
614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
615 return; 608 return;
616 609
617 spin_lock_irq(&dev_priv->irq_lock); 610 spin_lock_irq(&dev_priv->irq_lock);
618 611
619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 612 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
620 if (INTEL_INFO(dev)->gen >= 4) 613 if (INTEL_GEN(dev_priv) >= 4)
621 i915_enable_pipestat(dev_priv, PIPE_A, 614 i915_enable_pipestat(dev_priv, PIPE_A,
622 PIPE_LEGACY_BLC_EVENT_STATUS); 615 PIPE_LEGACY_BLC_EVENT_STATUS);
623 616
@@ -750,7 +743,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
750 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 743 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
751 vtotal /= 2; 744 vtotal /= 2;
752 745
753 if (IS_GEN2(dev)) 746 if (IS_GEN2(dev_priv))
754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 747 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
755 else 748 else
756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 749 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +760,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
767 * problem. We may need to extend this to include other platforms, 760 * problem. We may need to extend this to include other platforms,
768 * but so far testing only shows the problem on HSW. 761 * but so far testing only shows the problem on HSW.
769 */ 762 */
770 if (HAS_DDI(dev) && !position) { 763 if (HAS_DDI(dev_priv) && !position) {
771 int i, temp; 764 int i, temp;
772 765
773 for (i = 0; i < 100; i++) { 766 for (i = 0; i < 100; i++) {
@@ -835,7 +828,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
835 if (stime) 828 if (stime)
836 *stime = ktime_get(); 829 *stime = ktime_get();
837 830
838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 831 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
839 /* No obvious pixelcount register. Only query vertical 832 /* No obvious pixelcount register. Only query vertical
840 * scanout position from Display scan line register. 833 * scanout position from Display scan line register.
841 */ 834 */
@@ -897,7 +890,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
897 else 890 else
898 position += vtotal - vbl_end; 891 position += vtotal - vbl_end;
899 892
900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 893 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
901 *vpos = position; 894 *vpos = position;
902 *hpos = 0; 895 *hpos = 0;
903 } else { 896 } else {
@@ -955,9 +948,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
955 &crtc->hwmode); 948 &crtc->hwmode);
956} 949}
957 950
958static void ironlake_rps_change_irq_handler(struct drm_device *dev) 951static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
959{ 952{
960 struct drm_i915_private *dev_priv = dev->dev_private;
961 u32 busy_up, busy_down, max_avg, min_avg; 953 u32 busy_up, busy_down, max_avg, min_avg;
962 u8 new_delay; 954 u8 new_delay;
963 955
@@ -986,7 +978,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
986 new_delay = dev_priv->ips.min_delay; 978 new_delay = dev_priv->ips.min_delay;
987 } 979 }
988 980
989 if (ironlake_set_drps(dev, new_delay)) 981 if (ironlake_set_drps(dev_priv, new_delay))
990 dev_priv->ips.cur_delay = new_delay; 982 dev_priv->ips.cur_delay = new_delay;
991 983
992 spin_unlock(&mchdev_lock); 984 spin_unlock(&mchdev_lock);
@@ -1175,7 +1167,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1175 new_delay += adj; 1167 new_delay += adj;
1176 new_delay = clamp_t(int, new_delay, min, max); 1168 new_delay = clamp_t(int, new_delay, min, max);
1177 1169
1178 intel_set_rps(dev_priv->dev, new_delay); 1170 intel_set_rps(dev_priv, new_delay);
1179 1171
1180 mutex_unlock(&dev_priv->rps.hw_lock); 1172 mutex_unlock(&dev_priv->rps.hw_lock);
1181out: 1173out:
@@ -1506,27 +1498,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1506 1498
1507} 1499}
1508 1500
1509static void gmbus_irq_handler(struct drm_device *dev) 1501static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1510{ 1502{
1511 struct drm_i915_private *dev_priv = dev->dev_private;
1512
1513 wake_up_all(&dev_priv->gmbus_wait_queue); 1503 wake_up_all(&dev_priv->gmbus_wait_queue);
1514} 1504}
1515 1505
1516static void dp_aux_irq_handler(struct drm_device *dev) 1506static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1517{ 1507{
1518 struct drm_i915_private *dev_priv = dev->dev_private;
1519
1520 wake_up_all(&dev_priv->gmbus_wait_queue); 1508 wake_up_all(&dev_priv->gmbus_wait_queue);
1521} 1509}
1522 1510
1523#if defined(CONFIG_DEBUG_FS) 1511#if defined(CONFIG_DEBUG_FS)
1524static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1512static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1513 enum pipe pipe,
1525 uint32_t crc0, uint32_t crc1, 1514 uint32_t crc0, uint32_t crc1,
1526 uint32_t crc2, uint32_t crc3, 1515 uint32_t crc2, uint32_t crc3,
1527 uint32_t crc4) 1516 uint32_t crc4)
1528{ 1517{
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1530 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1518 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1531 struct intel_pipe_crc_entry *entry; 1519 struct intel_pipe_crc_entry *entry;
1532 int head, tail; 1520 int head, tail;
@@ -1550,7 +1538,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1550 1538
1551 entry = &pipe_crc->entries[head]; 1539 entry = &pipe_crc->entries[head];
1552 1540
1553 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1541 entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
1542 pipe);
1554 entry->crc[0] = crc0; 1543 entry->crc[0] = crc0;
1555 entry->crc[1] = crc1; 1544 entry->crc[1] = crc1;
1556 entry->crc[2] = crc2; 1545 entry->crc[2] = crc2;
@@ -1566,27 +1555,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1566} 1555}
1567#else 1556#else
1568static inline void 1557static inline void
1569display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1558display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1559 enum pipe pipe,
1570 uint32_t crc0, uint32_t crc1, 1560 uint32_t crc0, uint32_t crc1,
1571 uint32_t crc2, uint32_t crc3, 1561 uint32_t crc2, uint32_t crc3,
1572 uint32_t crc4) {} 1562 uint32_t crc4) {}
1573#endif 1563#endif
1574 1564
1575 1565
1576static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1566static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1567 enum pipe pipe)
1577{ 1568{
1578 struct drm_i915_private *dev_priv = dev->dev_private; 1569 display_pipe_crc_irq_handler(dev_priv, pipe,
1579
1580 display_pipe_crc_irq_handler(dev, pipe,
1581 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1570 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1582 0, 0, 0, 0); 1571 0, 0, 0, 0);
1583} 1572}
1584 1573
1585static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1574static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1575 enum pipe pipe)
1586{ 1576{
1587 struct drm_i915_private *dev_priv = dev->dev_private; 1577 display_pipe_crc_irq_handler(dev_priv, pipe,
1588
1589 display_pipe_crc_irq_handler(dev, pipe,
1590 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1578 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1591 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1579 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1592 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1580 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1582,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1594 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1582 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1595} 1583}
1596 1584
1597static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1585static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1586 enum pipe pipe)
1598{ 1587{
1599 struct drm_i915_private *dev_priv = dev->dev_private;
1600 uint32_t res1, res2; 1588 uint32_t res1, res2;
1601 1589
1602 if (INTEL_INFO(dev)->gen >= 3) 1590 if (INTEL_GEN(dev_priv) >= 3)
1603 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1591 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1604 else 1592 else
1605 res1 = 0; 1593 res1 = 0;
1606 1594
1607 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1595 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1608 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1596 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1609 else 1597 else
1610 res2 = 0; 1598 res2 = 0;
1611 1599
1612 display_pipe_crc_irq_handler(dev, pipe, 1600 display_pipe_crc_irq_handler(dev_priv, pipe,
1613 I915_READ(PIPE_CRC_RES_RED(pipe)), 1601 I915_READ(PIPE_CRC_RES_RED(pipe)),
1614 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1602 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1615 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1603 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1643,18 +1631,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1643 } 1631 }
1644} 1632}
1645 1633
1646static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1634static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1635 enum pipe pipe)
1647{ 1636{
1648 if (!drm_handle_vblank(dev, pipe)) 1637 bool ret;
1649 return false;
1650 1638
1651 return true; 1639 ret = drm_handle_vblank(dev_priv->dev, pipe);
1640 if (ret)
1641 intel_finish_page_flip_mmio(dev_priv, pipe);
1642
1643 return ret;
1652} 1644}
1653 1645
1654static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, 1646static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1655 u32 pipe_stats[I915_MAX_PIPES]) 1647 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1656{ 1648{
1657 struct drm_i915_private *dev_priv = dev->dev_private;
1658 int pipe; 1649 int pipe;
1659 1650
1660 spin_lock(&dev_priv->irq_lock); 1651 spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1701,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
1710 spin_unlock(&dev_priv->irq_lock); 1701 spin_unlock(&dev_priv->irq_lock);
1711} 1702}
1712 1703
1713static void valleyview_pipestat_irq_handler(struct drm_device *dev, 1704static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1714 u32 pipe_stats[I915_MAX_PIPES]) 1705 u32 pipe_stats[I915_MAX_PIPES])
1715{ 1706{
1716 struct drm_i915_private *dev_priv = to_i915(dev);
1717 enum pipe pipe; 1707 enum pipe pipe;
1718 1708
1719 for_each_pipe(dev_priv, pipe) { 1709 for_each_pipe(dev_priv, pipe) {
1720 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1710 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1721 intel_pipe_handle_vblank(dev, pipe)) 1711 intel_pipe_handle_vblank(dev_priv, pipe))
1722 intel_check_page_flip(dev, pipe); 1712 intel_check_page_flip(dev_priv, pipe);
1723 1713
1724 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1714 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1725 intel_prepare_page_flip(dev, pipe); 1715 intel_finish_page_flip_cs(dev_priv, pipe);
1726 intel_finish_page_flip(dev, pipe);
1727 }
1728 1716
1729 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1717 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1730 i9xx_pipe_crc_irq_handler(dev, pipe); 1718 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1731 1719
1732 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1720 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1733 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1721 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1734 } 1722 }
1735 1723
1736 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1724 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1737 gmbus_irq_handler(dev); 1725 gmbus_irq_handler(dev_priv);
1738} 1726}
1739 1727
1740static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1728static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1735,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1747 return hotplug_status; 1735 return hotplug_status;
1748} 1736}
1749 1737
1750static void i9xx_hpd_irq_handler(struct drm_device *dev, 1738static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1751 u32 hotplug_status) 1739 u32 hotplug_status)
1752{ 1740{
1753 u32 pin_mask = 0, long_mask = 0; 1741 u32 pin_mask = 0, long_mask = 0;
1754 1742
1755 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1743 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1744 IS_CHERRYVIEW(dev_priv)) {
1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1745 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1757 1746
1758 if (hotplug_trigger) { 1747 if (hotplug_trigger) {
@@ -1760,11 +1749,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1760 hotplug_trigger, hpd_status_g4x, 1749 hotplug_trigger, hpd_status_g4x,
1761 i9xx_port_hotplug_long_detect); 1750 i9xx_port_hotplug_long_detect);
1762 1751
1763 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1752 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1764 } 1753 }
1765 1754
1766 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1755 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1767 dp_aux_irq_handler(dev); 1756 dp_aux_irq_handler(dev_priv);
1768 } else { 1757 } else {
1769 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1758 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1770 1759
@@ -1772,7 +1761,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1772 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1761 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1773 hotplug_trigger, hpd_status_i915, 1762 hotplug_trigger, hpd_status_i915,
1774 i9xx_port_hotplug_long_detect); 1763 i9xx_port_hotplug_long_detect);
1775 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1764 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1776 } 1765 }
1777 } 1766 }
1778} 1767}
@@ -1831,7 +1820,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1831 1820
1832 /* Call regardless, as some status bits might not be 1821 /* Call regardless, as some status bits might not be
1833 * signalled in iir */ 1822 * signalled in iir */
1834 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1823 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1835 1824
1836 /* 1825 /*
1837 * VLV_IIR is single buffered, and reflects the level 1826 * VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1839,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1850 gen6_rps_irq_handler(dev_priv, pm_iir); 1839 gen6_rps_irq_handler(dev_priv, pm_iir);
1851 1840
1852 if (hotplug_status) 1841 if (hotplug_status)
1853 i9xx_hpd_irq_handler(dev, hotplug_status); 1842 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1854 1843
1855 valleyview_pipestat_irq_handler(dev, pipe_stats); 1844 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1856 } while (0); 1845 } while (0);
1857 1846
1858 enable_rpm_wakeref_asserts(dev_priv); 1847 enable_rpm_wakeref_asserts(dev_priv);
@@ -1911,7 +1900,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1911 1900
1912 /* Call regardless, as some status bits might not be 1901 /* Call regardless, as some status bits might not be
1913 * signalled in iir */ 1902 * signalled in iir */
1914 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1903 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1915 1904
1916 /* 1905 /*
1917 * VLV_IIR is single buffered, and reflects the level 1906 * VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1916,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1927 gen8_gt_irq_handler(dev_priv, gt_iir); 1916 gen8_gt_irq_handler(dev_priv, gt_iir);
1928 1917
1929 if (hotplug_status) 1918 if (hotplug_status)
1930 i9xx_hpd_irq_handler(dev, hotplug_status); 1919 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1931 1920
1932 valleyview_pipestat_irq_handler(dev, pipe_stats); 1921 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1933 } while (0); 1922 } while (0);
1934 1923
1935 enable_rpm_wakeref_asserts(dev_priv); 1924 enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1926,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1937 return ret; 1926 return ret;
1938} 1927}
1939 1928
1940static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 1929static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1930 u32 hotplug_trigger,
1941 const u32 hpd[HPD_NUM_PINS]) 1931 const u32 hpd[HPD_NUM_PINS])
1942{ 1932{
1943 struct drm_i915_private *dev_priv = to_i915(dev);
1944 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1933 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1945 1934
1946 /* 1935 /*
@@ -1966,16 +1955,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1966 dig_hotplug_reg, hpd, 1955 dig_hotplug_reg, hpd,
1967 pch_port_hotplug_long_detect); 1956 pch_port_hotplug_long_detect);
1968 1957
1969 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1958 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1970} 1959}
1971 1960
1972static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1961static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1973{ 1962{
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 int pipe; 1963 int pipe;
1976 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1964 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1977 1965
1978 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1966 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1979 1967
1980 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1968 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1981 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1969 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1973,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1985 } 1973 }
1986 1974
1987 if (pch_iir & SDE_AUX_MASK) 1975 if (pch_iir & SDE_AUX_MASK)
1988 dp_aux_irq_handler(dev); 1976 dp_aux_irq_handler(dev_priv);
1989 1977
1990 if (pch_iir & SDE_GMBUS) 1978 if (pch_iir & SDE_GMBUS)
1991 gmbus_irq_handler(dev); 1979 gmbus_irq_handler(dev_priv);
1992 1980
1993 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1981 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1994 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1982 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +2006,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2006 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2019} 2007}
2020 2008
2021static void ivb_err_int_handler(struct drm_device *dev) 2009static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2022{ 2010{
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 err_int = I915_READ(GEN7_ERR_INT); 2011 u32 err_int = I915_READ(GEN7_ERR_INT);
2025 enum pipe pipe; 2012 enum pipe pipe;
2026 2013
@@ -2032,19 +2019,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
2032 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2019 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2033 2020
2034 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2021 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2035 if (IS_IVYBRIDGE(dev)) 2022 if (IS_IVYBRIDGE(dev_priv))
2036 ivb_pipe_crc_irq_handler(dev, pipe); 2023 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2037 else 2024 else
2038 hsw_pipe_crc_irq_handler(dev, pipe); 2025 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2039 } 2026 }
2040 } 2027 }
2041 2028
2042 I915_WRITE(GEN7_ERR_INT, err_int); 2029 I915_WRITE(GEN7_ERR_INT, err_int);
2043} 2030}
2044 2031
2045static void cpt_serr_int_handler(struct drm_device *dev) 2032static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2046{ 2033{
2047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 serr_int = I915_READ(SERR_INT); 2034 u32 serr_int = I915_READ(SERR_INT);
2049 2035
2050 if (serr_int & SERR_INT_POISON) 2036 if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2048,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
2062 I915_WRITE(SERR_INT, serr_int); 2048 I915_WRITE(SERR_INT, serr_int);
2063} 2049}
2064 2050
2065static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2051static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2066{ 2052{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068 int pipe; 2053 int pipe;
2069 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2054 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2070 2055
2071 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2056 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2072 2057
2073 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2058 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2074 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2059 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2063,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2078 } 2063 }
2079 2064
2080 if (pch_iir & SDE_AUX_MASK_CPT) 2065 if (pch_iir & SDE_AUX_MASK_CPT)
2081 dp_aux_irq_handler(dev); 2066 dp_aux_irq_handler(dev_priv);
2082 2067
2083 if (pch_iir & SDE_GMBUS_CPT) 2068 if (pch_iir & SDE_GMBUS_CPT)
2084 gmbus_irq_handler(dev); 2069 gmbus_irq_handler(dev_priv);
2085 2070
2086 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2071 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2087 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2072 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2081,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2096 I915_READ(FDI_RX_IIR(pipe))); 2081 I915_READ(FDI_RX_IIR(pipe)));
2097 2082
2098 if (pch_iir & SDE_ERROR_CPT) 2083 if (pch_iir & SDE_ERROR_CPT)
2099 cpt_serr_int_handler(dev); 2084 cpt_serr_int_handler(dev_priv);
2100} 2085}
2101 2086
2102static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) 2087static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2103{ 2088{
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2089 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2106 ~SDE_PORTE_HOTPLUG_SPT; 2090 ~SDE_PORTE_HOTPLUG_SPT;
2107 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2091 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2114,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2130 } 2114 }
2131 2115
2132 if (pin_mask) 2116 if (pin_mask)
2133 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2117 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2134 2118
2135 if (pch_iir & SDE_GMBUS_CPT) 2119 if (pch_iir & SDE_GMBUS_CPT)
2136 gmbus_irq_handler(dev); 2120 gmbus_irq_handler(dev_priv);
2137} 2121}
2138 2122
2139static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2123static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2124 u32 hotplug_trigger,
2140 const u32 hpd[HPD_NUM_PINS]) 2125 const u32 hpd[HPD_NUM_PINS])
2141{ 2126{
2142 struct drm_i915_private *dev_priv = to_i915(dev);
2143 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2127 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2144 2128
2145 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2129 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2133,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2149 dig_hotplug_reg, hpd, 2133 dig_hotplug_reg, hpd,
2150 ilk_port_hotplug_long_detect); 2134 ilk_port_hotplug_long_detect);
2151 2135
2152 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2136 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2153} 2137}
2154 2138
2155static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2139static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2140 u32 de_iir)
2156{ 2141{
2157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 enum pipe pipe; 2142 enum pipe pipe;
2159 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2143 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2160 2144
2161 if (hotplug_trigger) 2145 if (hotplug_trigger)
2162 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); 2146 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2163 2147
2164 if (de_iir & DE_AUX_CHANNEL_A) 2148 if (de_iir & DE_AUX_CHANNEL_A)
2165 dp_aux_irq_handler(dev); 2149 dp_aux_irq_handler(dev_priv);
2166 2150
2167 if (de_iir & DE_GSE) 2151 if (de_iir & DE_GSE)
2168 intel_opregion_asle_intr(dev); 2152 intel_opregion_asle_intr(dev_priv);
2169 2153
2170 if (de_iir & DE_POISON) 2154 if (de_iir & DE_POISON)
2171 DRM_ERROR("Poison interrupt\n"); 2155 DRM_ERROR("Poison interrupt\n");
2172 2156
2173 for_each_pipe(dev_priv, pipe) { 2157 for_each_pipe(dev_priv, pipe) {
2174 if (de_iir & DE_PIPE_VBLANK(pipe) && 2158 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2175 intel_pipe_handle_vblank(dev, pipe)) 2159 intel_pipe_handle_vblank(dev_priv, pipe))
2176 intel_check_page_flip(dev, pipe); 2160 intel_check_page_flip(dev_priv, pipe);
2177 2161
2178 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2162 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2179 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2163 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2180 2164
2181 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2165 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2182 i9xx_pipe_crc_irq_handler(dev, pipe); 2166 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2183 2167
2184 /* plane/pipes map 1:1 on ilk+ */ 2168 /* plane/pipes map 1:1 on ilk+ */
2185 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2169 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2186 intel_prepare_page_flip(dev, pipe); 2170 intel_finish_page_flip_cs(dev_priv, pipe);
2187 intel_finish_page_flip_plane(dev, pipe);
2188 }
2189 } 2171 }
2190 2172
2191 /* check event from PCH */ 2173 /* check event from PCH */
2192 if (de_iir & DE_PCH_EVENT) { 2174 if (de_iir & DE_PCH_EVENT) {
2193 u32 pch_iir = I915_READ(SDEIIR); 2175 u32 pch_iir = I915_READ(SDEIIR);
2194 2176
2195 if (HAS_PCH_CPT(dev)) 2177 if (HAS_PCH_CPT(dev_priv))
2196 cpt_irq_handler(dev, pch_iir); 2178 cpt_irq_handler(dev_priv, pch_iir);
2197 else 2179 else
2198 ibx_irq_handler(dev, pch_iir); 2180 ibx_irq_handler(dev_priv, pch_iir);
2199 2181
2200 /* should clear PCH hotplug event before clear CPU irq */ 2182 /* should clear PCH hotplug event before clear CPU irq */
2201 I915_WRITE(SDEIIR, pch_iir); 2183 I915_WRITE(SDEIIR, pch_iir);
2202 } 2184 }
2203 2185
2204 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2186 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2205 ironlake_rps_change_irq_handler(dev); 2187 ironlake_rps_change_irq_handler(dev_priv);
2206} 2188}
2207 2189
2208static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2190static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2191 u32 de_iir)
2209{ 2192{
2210 struct drm_i915_private *dev_priv = dev->dev_private;
2211 enum pipe pipe; 2193 enum pipe pipe;
2212 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2194 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2213 2195
2214 if (hotplug_trigger) 2196 if (hotplug_trigger)
2215 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); 2197 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2216 2198
2217 if (de_iir & DE_ERR_INT_IVB) 2199 if (de_iir & DE_ERR_INT_IVB)
2218 ivb_err_int_handler(dev); 2200 ivb_err_int_handler(dev_priv);
2219 2201
2220 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2202 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2221 dp_aux_irq_handler(dev); 2203 dp_aux_irq_handler(dev_priv);
2222 2204
2223 if (de_iir & DE_GSE_IVB) 2205 if (de_iir & DE_GSE_IVB)
2224 intel_opregion_asle_intr(dev); 2206 intel_opregion_asle_intr(dev_priv);
2225 2207
2226 for_each_pipe(dev_priv, pipe) { 2208 for_each_pipe(dev_priv, pipe) {
2227 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2209 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2228 intel_pipe_handle_vblank(dev, pipe)) 2210 intel_pipe_handle_vblank(dev_priv, pipe))
2229 intel_check_page_flip(dev, pipe); 2211 intel_check_page_flip(dev_priv, pipe);
2230 2212
2231 /* plane/pipes map 1:1 on ilk+ */ 2213 /* plane/pipes map 1:1 on ilk+ */
2232 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2214 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2233 intel_prepare_page_flip(dev, pipe); 2215 intel_finish_page_flip_cs(dev_priv, pipe);
2234 intel_finish_page_flip_plane(dev, pipe);
2235 }
2236 } 2216 }
2237 2217
2238 /* check event from PCH */ 2218 /* check event from PCH */
2239 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2219 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2240 u32 pch_iir = I915_READ(SDEIIR); 2220 u32 pch_iir = I915_READ(SDEIIR);
2241 2221
2242 cpt_irq_handler(dev, pch_iir); 2222 cpt_irq_handler(dev_priv, pch_iir);
2243 2223
2244 /* clear PCH hotplug event before clear CPU irq */ 2224 /* clear PCH hotplug event before clear CPU irq */
2245 I915_WRITE(SDEIIR, pch_iir); 2225 I915_WRITE(SDEIIR, pch_iir);
@@ -2277,7 +2257,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2277 * able to process them after we restore SDEIER (as soon as we restore 2257 * able to process them after we restore SDEIER (as soon as we restore
2278 * it, we'll get an interrupt if SDEIIR still has something to process 2258 * it, we'll get an interrupt if SDEIIR still has something to process
2279 * due to its back queue). */ 2259 * due to its back queue). */
2280 if (!HAS_PCH_NOP(dev)) { 2260 if (!HAS_PCH_NOP(dev_priv)) {
2281 sde_ier = I915_READ(SDEIER); 2261 sde_ier = I915_READ(SDEIER);
2282 I915_WRITE(SDEIER, 0); 2262 I915_WRITE(SDEIER, 0);
2283 POSTING_READ(SDEIER); 2263 POSTING_READ(SDEIER);
@@ -2289,7 +2269,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2289 if (gt_iir) { 2269 if (gt_iir) {
2290 I915_WRITE(GTIIR, gt_iir); 2270 I915_WRITE(GTIIR, gt_iir);
2291 ret = IRQ_HANDLED; 2271 ret = IRQ_HANDLED;
2292 if (INTEL_INFO(dev)->gen >= 6) 2272 if (INTEL_GEN(dev_priv) >= 6)
2293 snb_gt_irq_handler(dev_priv, gt_iir); 2273 snb_gt_irq_handler(dev_priv, gt_iir);
2294 else 2274 else
2295 ilk_gt_irq_handler(dev_priv, gt_iir); 2275 ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2279,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2299 if (de_iir) { 2279 if (de_iir) {
2300 I915_WRITE(DEIIR, de_iir); 2280 I915_WRITE(DEIIR, de_iir);
2301 ret = IRQ_HANDLED; 2281 ret = IRQ_HANDLED;
2302 if (INTEL_INFO(dev)->gen >= 7) 2282 if (INTEL_GEN(dev_priv) >= 7)
2303 ivb_display_irq_handler(dev, de_iir); 2283 ivb_display_irq_handler(dev_priv, de_iir);
2304 else 2284 else
2305 ilk_display_irq_handler(dev, de_iir); 2285 ilk_display_irq_handler(dev_priv, de_iir);
2306 } 2286 }
2307 2287
2308 if (INTEL_INFO(dev)->gen >= 6) { 2288 if (INTEL_GEN(dev_priv) >= 6) {
2309 u32 pm_iir = I915_READ(GEN6_PMIIR); 2289 u32 pm_iir = I915_READ(GEN6_PMIIR);
2310 if (pm_iir) { 2290 if (pm_iir) {
2311 I915_WRITE(GEN6_PMIIR, pm_iir); 2291 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2296,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2316 2296
2317 I915_WRITE(DEIER, de_ier); 2297 I915_WRITE(DEIER, de_ier);
2318 POSTING_READ(DEIER); 2298 POSTING_READ(DEIER);
2319 if (!HAS_PCH_NOP(dev)) { 2299 if (!HAS_PCH_NOP(dev_priv)) {
2320 I915_WRITE(SDEIER, sde_ier); 2300 I915_WRITE(SDEIER, sde_ier);
2321 POSTING_READ(SDEIER); 2301 POSTING_READ(SDEIER);
2322 } 2302 }
@@ -2327,10 +2307,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2327 return ret; 2307 return ret;
2328} 2308}
2329 2309
2330static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2310static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2311 u32 hotplug_trigger,
2331 const u32 hpd[HPD_NUM_PINS]) 2312 const u32 hpd[HPD_NUM_PINS])
2332{ 2313{
2333 struct drm_i915_private *dev_priv = to_i915(dev);
2334 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2314 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2335 2315
2336 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2316 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2320,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2340 dig_hotplug_reg, hpd, 2320 dig_hotplug_reg, hpd,
2341 bxt_port_hotplug_long_detect); 2321 bxt_port_hotplug_long_detect);
2342 2322
2343 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2323 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2344} 2324}
2345 2325
2346static irqreturn_t 2326static irqreturn_t
2347gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2327gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2348{ 2328{
2349 struct drm_device *dev = dev_priv->dev;
2350 irqreturn_t ret = IRQ_NONE; 2329 irqreturn_t ret = IRQ_NONE;
2351 u32 iir; 2330 u32 iir;
2352 enum pipe pipe; 2331 enum pipe pipe;
@@ -2357,7 +2336,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2357 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2336 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2358 ret = IRQ_HANDLED; 2337 ret = IRQ_HANDLED;
2359 if (iir & GEN8_DE_MISC_GSE) 2338 if (iir & GEN8_DE_MISC_GSE)
2360 intel_opregion_asle_intr(dev); 2339 intel_opregion_asle_intr(dev_priv);
2361 else 2340 else
2362 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2341 DRM_ERROR("Unexpected DE Misc interrupt\n");
2363 } 2342 }
@@ -2381,26 +2360,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2381 GEN9_AUX_CHANNEL_D; 2360 GEN9_AUX_CHANNEL_D;
2382 2361
2383 if (iir & tmp_mask) { 2362 if (iir & tmp_mask) {
2384 dp_aux_irq_handler(dev); 2363 dp_aux_irq_handler(dev_priv);
2385 found = true; 2364 found = true;
2386 } 2365 }
2387 2366
2388 if (IS_BROXTON(dev_priv)) { 2367 if (IS_BROXTON(dev_priv)) {
2389 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2368 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2390 if (tmp_mask) { 2369 if (tmp_mask) {
2391 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); 2370 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2371 hpd_bxt);
2392 found = true; 2372 found = true;
2393 } 2373 }
2394 } else if (IS_BROADWELL(dev_priv)) { 2374 } else if (IS_BROADWELL(dev_priv)) {
2395 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2375 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2396 if (tmp_mask) { 2376 if (tmp_mask) {
2397 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); 2377 ilk_hpd_irq_handler(dev_priv,
2378 tmp_mask, hpd_bdw);
2398 found = true; 2379 found = true;
2399 } 2380 }
2400 } 2381 }
2401 2382
2402 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { 2383 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2403 gmbus_irq_handler(dev); 2384 gmbus_irq_handler(dev_priv);
2404 found = true; 2385 found = true;
2405 } 2386 }
2406 2387
@@ -2427,8 +2408,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2427 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2408 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2428 2409
2429 if (iir & GEN8_PIPE_VBLANK && 2410 if (iir & GEN8_PIPE_VBLANK &&
2430 intel_pipe_handle_vblank(dev, pipe)) 2411 intel_pipe_handle_vblank(dev_priv, pipe))
2431 intel_check_page_flip(dev, pipe); 2412 intel_check_page_flip(dev_priv, pipe);
2432 2413
2433 flip_done = iir; 2414 flip_done = iir;
2434 if (INTEL_INFO(dev_priv)->gen >= 9) 2415 if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2417,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2436 else 2417 else
2437 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2418 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2438 2419
2439 if (flip_done) { 2420 if (flip_done)
2440 intel_prepare_page_flip(dev, pipe); 2421 intel_finish_page_flip_cs(dev_priv, pipe);
2441 intel_finish_page_flip_plane(dev, pipe);
2442 }
2443 2422
2444 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2423 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2445 hsw_pipe_crc_irq_handler(dev, pipe); 2424 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2446 2425
2447 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2426 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2448 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2427 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2438,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2459 fault_errors); 2438 fault_errors);
2460 } 2439 }
2461 2440
2462 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2441 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2463 master_ctl & GEN8_DE_PCH_IRQ) { 2442 master_ctl & GEN8_DE_PCH_IRQ) {
2464 /* 2443 /*
2465 * FIXME(BDW): Assume for now that the new interrupt handling 2444 * FIXME(BDW): Assume for now that the new interrupt handling
@@ -2472,9 +2451,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2472 ret = IRQ_HANDLED; 2451 ret = IRQ_HANDLED;
2473 2452
2474 if (HAS_PCH_SPT(dev_priv)) 2453 if (HAS_PCH_SPT(dev_priv))
2475 spt_irq_handler(dev, iir); 2454 spt_irq_handler(dev_priv, iir);
2476 else 2455 else
2477 cpt_irq_handler(dev, iir); 2456 cpt_irq_handler(dev_priv, iir);
2478 } else { 2457 } else {
2479 /* 2458 /*
2480 * Like on previous PCH there seems to be something 2459 * Like on previous PCH there seems to be something
@@ -2555,15 +2534,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2555 * Fire an error uevent so userspace can see that a hang or error 2534 * Fire an error uevent so userspace can see that a hang or error
2556 * was detected. 2535 * was detected.
2557 */ 2536 */
2558static void i915_reset_and_wakeup(struct drm_device *dev) 2537static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2559{ 2538{
2560 struct drm_i915_private *dev_priv = to_i915(dev); 2539 struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
2561 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2540 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2562 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2541 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2563 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2542 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2564 int ret; 2543 int ret;
2565 2544
2566 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2545 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2567 2546
2568 /* 2547 /*
2569 * Note that there's only one work item which does gpu resets, so we 2548 * Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2556,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2577 */ 2556 */
2578 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2557 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2579 DRM_DEBUG_DRIVER("resetting chip\n"); 2558 DRM_DEBUG_DRIVER("resetting chip\n");
2580 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2559 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2581 reset_event);
2582 2560
2583 /* 2561 /*
2584 * In most cases it's guaranteed that we get here with an RPM 2562 * In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2567,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2589 */ 2567 */
2590 intel_runtime_pm_get(dev_priv); 2568 intel_runtime_pm_get(dev_priv);
2591 2569
2592 intel_prepare_reset(dev); 2570 intel_prepare_reset(dev_priv);
2593 2571
2594 /* 2572 /*
2595 * All state reset _must_ be completed before we update the 2573 * All state reset _must_ be completed before we update the
@@ -2597,14 +2575,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2597 * pending state and not properly drop locks, resulting in 2575 * pending state and not properly drop locks, resulting in
2598 * deadlocks with the reset work. 2576 * deadlocks with the reset work.
2599 */ 2577 */
2600 ret = i915_reset(dev); 2578 ret = i915_reset(dev_priv);
2601 2579
2602 intel_finish_reset(dev); 2580 intel_finish_reset(dev_priv);
2603 2581
2604 intel_runtime_pm_put(dev_priv); 2582 intel_runtime_pm_put(dev_priv);
2605 2583
2606 if (ret == 0) 2584 if (ret == 0)
2607 kobject_uevent_env(&dev->primary->kdev->kobj, 2585 kobject_uevent_env(kobj,
2608 KOBJ_CHANGE, reset_done_event); 2586 KOBJ_CHANGE, reset_done_event);
2609 2587
2610 /* 2588 /*
@@ -2615,9 +2593,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2615 } 2593 }
2616} 2594}
2617 2595
2618static void i915_report_and_clear_eir(struct drm_device *dev) 2596static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2619{ 2597{
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2598 uint32_t instdone[I915_NUM_INSTDONE_REG];
2622 u32 eir = I915_READ(EIR); 2599 u32 eir = I915_READ(EIR);
2623 int pipe, i; 2600 int pipe, i;
@@ -2627,9 +2604,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2627 2604
2628 pr_err("render error detected, EIR: 0x%08x\n", eir); 2605 pr_err("render error detected, EIR: 0x%08x\n", eir);
2629 2606
2630 i915_get_extra_instdone(dev, instdone); 2607 i915_get_extra_instdone(dev_priv, instdone);
2631 2608
2632 if (IS_G4X(dev)) { 2609 if (IS_G4X(dev_priv)) {
2633 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2610 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2634 u32 ipeir = I915_READ(IPEIR_I965); 2611 u32 ipeir = I915_READ(IPEIR_I965);
2635 2612
@@ -2651,7 +2628,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2651 } 2628 }
2652 } 2629 }
2653 2630
2654 if (!IS_GEN2(dev)) { 2631 if (!IS_GEN2(dev_priv)) {
2655 if (eir & I915_ERROR_PAGE_TABLE) { 2632 if (eir & I915_ERROR_PAGE_TABLE) {
2656 u32 pgtbl_err = I915_READ(PGTBL_ER); 2633 u32 pgtbl_err = I915_READ(PGTBL_ER);
2657 pr_err("page table error\n"); 2634 pr_err("page table error\n");
@@ -2673,7 +2650,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2673 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2650 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2674 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2651 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2675 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2652 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2676 if (INTEL_INFO(dev)->gen < 4) { 2653 if (INTEL_GEN(dev_priv) < 4) {
2677 u32 ipeir = I915_READ(IPEIR); 2654 u32 ipeir = I915_READ(IPEIR);
2678 2655
2679 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2656 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2717,10 +2694,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2717 * so userspace knows something bad happened (should trigger collection 2694 * so userspace knows something bad happened (should trigger collection
2718 * of a ring dump etc.). 2695 * of a ring dump etc.).
2719 */ 2696 */
2720void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2697void i915_handle_error(struct drm_i915_private *dev_priv,
2698 u32 engine_mask,
2721 const char *fmt, ...) 2699 const char *fmt, ...)
2722{ 2700{
2723 struct drm_i915_private *dev_priv = dev->dev_private;
2724 va_list args; 2701 va_list args;
2725 char error_msg[80]; 2702 char error_msg[80];
2726 2703
@@ -2728,8 +2705,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2728 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2705 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2729 va_end(args); 2706 va_end(args);
2730 2707
2731 i915_capture_error_state(dev, engine_mask, error_msg); 2708 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2732 i915_report_and_clear_eir(dev); 2709 i915_report_and_clear_eir(dev_priv);
2733 2710
2734 if (engine_mask) { 2711 if (engine_mask) {
2735 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2712 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2751,7 +2728,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2751 i915_error_wake_up(dev_priv, false); 2728 i915_error_wake_up(dev_priv, false);
2752 } 2729 }
2753 2730
2754 i915_reset_and_wakeup(dev); 2731 i915_reset_and_wakeup(dev_priv);
2755} 2732}
2756 2733
2757/* Called from drm generic code, passed 'crtc' which 2734/* Called from drm generic code, passed 'crtc' which
@@ -2869,9 +2846,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
2869} 2846}
2870 2847
2871static bool 2848static bool
2872ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2849ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
2873{ 2850{
2874 if (INTEL_INFO(dev)->gen >= 8) { 2851 if (INTEL_GEN(dev_priv) >= 8) {
2875 return (ipehr >> 23) == 0x1c; 2852 return (ipehr >> 23) == 0x1c;
2876 } else { 2853 } else {
2877 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2854 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2861,10 @@ static struct intel_engine_cs *
2884semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2861semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2885 u64 offset) 2862 u64 offset)
2886{ 2863{
2887 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2864 struct drm_i915_private *dev_priv = engine->i915;
2888 struct intel_engine_cs *signaller; 2865 struct intel_engine_cs *signaller;
2889 2866
2890 if (INTEL_INFO(dev_priv)->gen >= 8) { 2867 if (INTEL_GEN(dev_priv) >= 8) {
2891 for_each_engine(signaller, dev_priv) { 2868 for_each_engine(signaller, dev_priv) {
2892 if (engine == signaller) 2869 if (engine == signaller)
2893 continue; 2870 continue;
@@ -2916,7 +2893,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2916static struct intel_engine_cs * 2893static struct intel_engine_cs *
2917semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2894semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2918{ 2895{
2919 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2896 struct drm_i915_private *dev_priv = engine->i915;
2920 u32 cmd, ipehr, head; 2897 u32 cmd, ipehr, head;
2921 u64 offset = 0; 2898 u64 offset = 0;
2922 int i, backwards; 2899 int i, backwards;
@@ -2942,7 +2919,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2942 return NULL; 2919 return NULL;
2943 2920
2944 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2921 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2945 if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) 2922 if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
2946 return NULL; 2923 return NULL;
2947 2924
2948 /* 2925 /*
@@ -2954,7 +2931,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2954 * ringbuffer itself. 2931 * ringbuffer itself.
2955 */ 2932 */
2956 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2933 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2957 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; 2934 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2958 2935
2959 for (i = backwards; i; --i) { 2936 for (i = backwards; i; --i) {
2960 /* 2937 /*
@@ -2976,7 +2953,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2976 return NULL; 2953 return NULL;
2977 2954
2978 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2955 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2979 if (INTEL_INFO(engine->dev)->gen >= 8) { 2956 if (INTEL_GEN(dev_priv) >= 8) {
2980 offset = ioread32(engine->buffer->virtual_start + head + 12); 2957 offset = ioread32(engine->buffer->virtual_start + head + 12);
2981 offset <<= 32; 2958 offset <<= 32;
2982 offset = ioread32(engine->buffer->virtual_start + head + 8); 2959 offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2963,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2986 2963
2987static int semaphore_passed(struct intel_engine_cs *engine) 2964static int semaphore_passed(struct intel_engine_cs *engine)
2988{ 2965{
2989 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2966 struct drm_i915_private *dev_priv = engine->i915;
2990 struct intel_engine_cs *signaller; 2967 struct intel_engine_cs *signaller;
2991 u32 seqno; 2968 u32 seqno;
2992 2969
@@ -3028,7 +3005,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
3028 if (engine->id != RCS) 3005 if (engine->id != RCS)
3029 return true; 3006 return true;
3030 3007
3031 i915_get_extra_instdone(engine->dev, instdone); 3008 i915_get_extra_instdone(engine->i915, instdone);
3032 3009
3033 /* There might be unstable subunit states even when 3010 /* There might be unstable subunit states even when
3034 * actual head is not moving. Filter out the unstable ones by 3011 * actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3046,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
3069static enum intel_ring_hangcheck_action 3046static enum intel_ring_hangcheck_action
3070ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3047ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3071{ 3048{
3072 struct drm_device *dev = engine->dev; 3049 struct drm_i915_private *dev_priv = engine->i915;
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 enum intel_ring_hangcheck_action ha; 3050 enum intel_ring_hangcheck_action ha;
3075 u32 tmp; 3051 u32 tmp;
3076 3052
@@ -3078,7 +3054,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3078 if (ha != HANGCHECK_HUNG) 3054 if (ha != HANGCHECK_HUNG)
3079 return ha; 3055 return ha;
3080 3056
3081 if (IS_GEN2(dev)) 3057 if (IS_GEN2(dev_priv))
3082 return HANGCHECK_HUNG; 3058 return HANGCHECK_HUNG;
3083 3059
3084 /* Is the chip hanging on a WAIT_FOR_EVENT? 3060 /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3064,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3088 */ 3064 */
3089 tmp = I915_READ_CTL(engine); 3065 tmp = I915_READ_CTL(engine);
3090 if (tmp & RING_WAIT) { 3066 if (tmp & RING_WAIT) {
3091 i915_handle_error(dev, 0, 3067 i915_handle_error(dev_priv, 0,
3092 "Kicking stuck wait on %s", 3068 "Kicking stuck wait on %s",
3093 engine->name); 3069 engine->name);
3094 I915_WRITE_CTL(engine, tmp); 3070 I915_WRITE_CTL(engine, tmp);
3095 return HANGCHECK_KICK; 3071 return HANGCHECK_KICK;
3096 } 3072 }
3097 3073
3098 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3074 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3099 switch (semaphore_passed(engine)) { 3075 switch (semaphore_passed(engine)) {
3100 default: 3076 default:
3101 return HANGCHECK_HUNG; 3077 return HANGCHECK_HUNG;
3102 case 1: 3078 case 1:
3103 i915_handle_error(dev, 0, 3079 i915_handle_error(dev_priv, 0,
3104 "Kicking stuck semaphore on %s", 3080 "Kicking stuck semaphore on %s",
3105 engine->name); 3081 engine->name);
3106 I915_WRITE_CTL(engine, tmp); 3082 I915_WRITE_CTL(engine, tmp);
@@ -3115,7 +3091,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3115 3091
3116static unsigned kick_waiters(struct intel_engine_cs *engine) 3092static unsigned kick_waiters(struct intel_engine_cs *engine)
3117{ 3093{
3118 struct drm_i915_private *i915 = to_i915(engine->dev); 3094 struct drm_i915_private *i915 = engine->i915;
3119 unsigned user_interrupts = READ_ONCE(engine->user_interrupts); 3095 unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3120 3096
3121 if (engine->hangcheck.user_interrupts == user_interrupts && 3097 if (engine->hangcheck.user_interrupts == user_interrupts &&
@@ -3144,7 +3120,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3144 struct drm_i915_private *dev_priv = 3120 struct drm_i915_private *dev_priv =
3145 container_of(work, typeof(*dev_priv), 3121 container_of(work, typeof(*dev_priv),
3146 gpu_error.hangcheck_work.work); 3122 gpu_error.hangcheck_work.work);
3147 struct drm_device *dev = dev_priv->dev;
3148 struct intel_engine_cs *engine; 3123 struct intel_engine_cs *engine;
3149 enum intel_engine_id id; 3124 enum intel_engine_id id;
3150 int busy_count = 0, rings_hung = 0; 3125 int busy_count = 0, rings_hung = 0;
@@ -3272,22 +3247,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3272 } 3247 }
3273 3248
3274 if (rings_hung) { 3249 if (rings_hung) {
3275 i915_handle_error(dev, rings_hung, "Engine(s) hung"); 3250 i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
3276 goto out; 3251 goto out;
3277 } 3252 }
3278 3253
3279 if (busy_count) 3254 if (busy_count)
3280 /* Reset timer case chip hangs without another request 3255 /* Reset timer case chip hangs without another request
3281 * being added */ 3256 * being added */
3282 i915_queue_hangcheck(dev); 3257 i915_queue_hangcheck(dev_priv);
3283 3258
3284out: 3259out:
3285 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3260 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3286} 3261}
3287 3262
3288void i915_queue_hangcheck(struct drm_device *dev) 3263void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3289{ 3264{
3290 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3265 struct i915_gpu_error *e = &dev_priv->gpu_error;
3291 3266
3292 if (!i915.enable_hangcheck) 3267 if (!i915.enable_hangcheck)
3293 return; 3268 return;
@@ -3500,31 +3475,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3500 spin_unlock_irq(&dev_priv->irq_lock); 3475 spin_unlock_irq(&dev_priv->irq_lock);
3501} 3476}
3502 3477
3503static u32 intel_hpd_enabled_irqs(struct drm_device *dev, 3478static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3504 const u32 hpd[HPD_NUM_PINS]) 3479 const u32 hpd[HPD_NUM_PINS])
3505{ 3480{
3506 struct drm_i915_private *dev_priv = to_i915(dev);
3507 struct intel_encoder *encoder; 3481 struct intel_encoder *encoder;
3508 u32 enabled_irqs = 0; 3482 u32 enabled_irqs = 0;
3509 3483
3510 for_each_intel_encoder(dev, encoder) 3484 for_each_intel_encoder(dev_priv->dev, encoder)
3511 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3485 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3512 enabled_irqs |= hpd[encoder->hpd_pin]; 3486 enabled_irqs |= hpd[encoder->hpd_pin];
3513 3487
3514 return enabled_irqs; 3488 return enabled_irqs;
3515} 3489}
3516 3490
3517static void ibx_hpd_irq_setup(struct drm_device *dev) 3491static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3518{ 3492{
3519 struct drm_i915_private *dev_priv = dev->dev_private;
3520 u32 hotplug_irqs, hotplug, enabled_irqs; 3493 u32 hotplug_irqs, hotplug, enabled_irqs;
3521 3494
3522 if (HAS_PCH_IBX(dev)) { 3495 if (HAS_PCH_IBX(dev_priv)) {
3523 hotplug_irqs = SDE_HOTPLUG_MASK; 3496 hotplug_irqs = SDE_HOTPLUG_MASK;
3524 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); 3497 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3525 } else { 3498 } else {
3526 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3499 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3527 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); 3500 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3528 } 3501 }
3529 3502
3530 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3503 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3516,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
3543 * When CPU and PCH are on the same package, port A 3516 * When CPU and PCH are on the same package, port A
3544 * HPD must be enabled in both north and south. 3517 * HPD must be enabled in both north and south.
3545 */ 3518 */
3546 if (HAS_PCH_LPT_LP(dev)) 3519 if (HAS_PCH_LPT_LP(dev_priv))
3547 hotplug |= PORTA_HOTPLUG_ENABLE; 3520 hotplug |= PORTA_HOTPLUG_ENABLE;
3548 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3521 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3549} 3522}
3550 3523
3551static void spt_hpd_irq_setup(struct drm_device *dev) 3524static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3552{ 3525{
3553 struct drm_i915_private *dev_priv = dev->dev_private;
3554 u32 hotplug_irqs, hotplug, enabled_irqs; 3526 u32 hotplug_irqs, hotplug, enabled_irqs;
3555 3527
3556 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3528 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3557 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); 3529 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3558 3530
3559 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3531 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3560 3532
@@ -3569,24 +3541,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
3569 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3541 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3570} 3542}
3571 3543
3572static void ilk_hpd_irq_setup(struct drm_device *dev) 3544static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3573{ 3545{
3574 struct drm_i915_private *dev_priv = dev->dev_private;
3575 u32 hotplug_irqs, hotplug, enabled_irqs; 3546 u32 hotplug_irqs, hotplug, enabled_irqs;
3576 3547
3577 if (INTEL_INFO(dev)->gen >= 8) { 3548 if (INTEL_GEN(dev_priv) >= 8) {
3578 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3549 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3579 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); 3550 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3580 3551
3581 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3552 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3582 } else if (INTEL_INFO(dev)->gen >= 7) { 3553 } else if (INTEL_GEN(dev_priv) >= 7) {
3583 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3554 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3584 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); 3555 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3585 3556
3586 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3557 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3587 } else { 3558 } else {
3588 hotplug_irqs = DE_DP_A_HOTPLUG; 3559 hotplug_irqs = DE_DP_A_HOTPLUG;
3589 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); 3560 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3590 3561
3591 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3562 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3592 } 3563 }
@@ -3601,15 +3572,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
3601 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3572 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3602 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3573 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3603 3574
3604 ibx_hpd_irq_setup(dev); 3575 ibx_hpd_irq_setup(dev_priv);
3605} 3576}
3606 3577
3607static void bxt_hpd_irq_setup(struct drm_device *dev) 3578static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3608{ 3579{
3609 struct drm_i915_private *dev_priv = dev->dev_private;
3610 u32 hotplug_irqs, hotplug, enabled_irqs; 3580 u32 hotplug_irqs, hotplug, enabled_irqs;
3611 3581
3612 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); 3582 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3613 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3583 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3614 3584
3615 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3585 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -4006,13 +3976,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
4006/* 3976/*
4007 * Returns true when a page flip has completed. 3977 * Returns true when a page flip has completed.
4008 */ 3978 */
4009static bool i8xx_handle_vblank(struct drm_device *dev, 3979static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
4010 int plane, int pipe, u32 iir) 3980 int plane, int pipe, u32 iir)
4011{ 3981{
4012 struct drm_i915_private *dev_priv = dev->dev_private;
4013 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3982 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4014 3983
4015 if (!intel_pipe_handle_vblank(dev, pipe)) 3984 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4016 return false; 3985 return false;
4017 3986
4018 if ((iir & flip_pending) == 0) 3987 if ((iir & flip_pending) == 0)
@@ -4027,12 +3996,11 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4027 if (I915_READ16(ISR) & flip_pending) 3996 if (I915_READ16(ISR) & flip_pending)
4028 goto check_page_flip; 3997 goto check_page_flip;
4029 3998
4030 intel_prepare_page_flip(dev, plane); 3999 intel_finish_page_flip_cs(dev_priv, pipe);
4031 intel_finish_page_flip(dev, pipe);
4032 return true; 4000 return true;
4033 4001
4034check_page_flip: 4002check_page_flip:
4035 intel_check_page_flip(dev, pipe); 4003 intel_check_page_flip(dev_priv, pipe);
4036 return false; 4004 return false;
4037} 4005}
4038 4006
@@ -4089,15 +4057,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4089 4057
4090 for_each_pipe(dev_priv, pipe) { 4058 for_each_pipe(dev_priv, pipe) {
4091 int plane = pipe; 4059 int plane = pipe;
4092 if (HAS_FBC(dev)) 4060 if (HAS_FBC(dev_priv))
4093 plane = !plane; 4061 plane = !plane;
4094 4062
4095 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4063 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4096 i8xx_handle_vblank(dev, plane, pipe, iir)) 4064 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4097 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4065 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4098 4066
4099 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4067 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4100 i9xx_pipe_crc_irq_handler(dev, pipe); 4068 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4101 4069
4102 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4070 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4103 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4071 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4182,7 +4150,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
4182 I915_WRITE(IER, enable_mask); 4150 I915_WRITE(IER, enable_mask);
4183 POSTING_READ(IER); 4151 POSTING_READ(IER);
4184 4152
4185 i915_enable_asle_pipestat(dev); 4153 i915_enable_asle_pipestat(dev_priv);
4186 4154
4187 /* Interrupt setup is already guaranteed to be single-threaded, this is 4155 /* Interrupt setup is already guaranteed to be single-threaded, this is
4188 * just to make the assert_spin_locked check happy. */ 4156 * just to make the assert_spin_locked check happy. */
@@ -4197,13 +4165,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
4197/* 4165/*
4198 * Returns true when a page flip has completed. 4166 * Returns true when a page flip has completed.
4199 */ 4167 */
4200static bool i915_handle_vblank(struct drm_device *dev, 4168static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4201 int plane, int pipe, u32 iir) 4169 int plane, int pipe, u32 iir)
4202{ 4170{
4203 struct drm_i915_private *dev_priv = dev->dev_private;
4204 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4171 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4205 4172
4206 if (!intel_pipe_handle_vblank(dev, pipe)) 4173 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4207 return false; 4174 return false;
4208 4175
4209 if ((iir & flip_pending) == 0) 4176 if ((iir & flip_pending) == 0)
@@ -4218,12 +4185,11 @@ static bool i915_handle_vblank(struct drm_device *dev,
4218 if (I915_READ(ISR) & flip_pending) 4185 if (I915_READ(ISR) & flip_pending)
4219 goto check_page_flip; 4186 goto check_page_flip;
4220 4187
4221 intel_prepare_page_flip(dev, plane); 4188 intel_finish_page_flip_cs(dev_priv, pipe);
4222 intel_finish_page_flip(dev, pipe);
4223 return true; 4189 return true;
4224 4190
4225check_page_flip: 4191check_page_flip:
4226 intel_check_page_flip(dev, pipe); 4192 intel_check_page_flip(dev_priv, pipe);
4227 return false; 4193 return false;
4228} 4194}
4229 4195
@@ -4273,11 +4239,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4273 break; 4239 break;
4274 4240
4275 /* Consume port. Then clear IIR or we'll miss events */ 4241 /* Consume port. Then clear IIR or we'll miss events */
4276 if (I915_HAS_HOTPLUG(dev) && 4242 if (I915_HAS_HOTPLUG(dev_priv) &&
4277 iir & I915_DISPLAY_PORT_INTERRUPT) { 4243 iir & I915_DISPLAY_PORT_INTERRUPT) {
4278 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4244 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4279 if (hotplug_status) 4245 if (hotplug_status)
4280 i9xx_hpd_irq_handler(dev, hotplug_status); 4246 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4281 } 4247 }
4282 4248
4283 I915_WRITE(IIR, iir & ~flip_mask); 4249 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4254,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4288 4254
4289 for_each_pipe(dev_priv, pipe) { 4255 for_each_pipe(dev_priv, pipe) {
4290 int plane = pipe; 4256 int plane = pipe;
4291 if (HAS_FBC(dev)) 4257 if (HAS_FBC(dev_priv))
4292 plane = !plane; 4258 plane = !plane;
4293 4259
4294 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4260 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4295 i915_handle_vblank(dev, plane, pipe, iir)) 4261 i915_handle_vblank(dev_priv, plane, pipe, iir))
4296 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4262 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4297 4263
4298 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4264 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4299 blc_event = true; 4265 blc_event = true;
4300 4266
4301 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4267 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4302 i9xx_pipe_crc_irq_handler(dev, pipe); 4268 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4303 4269
4304 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4270 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4305 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4271 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4273,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4307 } 4273 }
4308 4274
4309 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4275 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4310 intel_opregion_asle_intr(dev); 4276 intel_opregion_asle_intr(dev_priv);
4311 4277
4312 /* With MSI, interrupts are only generated when iir 4278 /* With MSI, interrupts are only generated when iir
4313 * transitions from zero to nonzero. If another bit got 4279 * transitions from zero to nonzero. If another bit got
@@ -4391,7 +4357,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4391 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4357 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4392 enable_mask |= I915_USER_INTERRUPT; 4358 enable_mask |= I915_USER_INTERRUPT;
4393 4359
4394 if (IS_G4X(dev)) 4360 if (IS_G4X(dev_priv))
4395 enable_mask |= I915_BSD_USER_INTERRUPT; 4361 enable_mask |= I915_BSD_USER_INTERRUPT;
4396 4362
4397 /* Interrupt setup is already guaranteed to be single-threaded, this is 4363 /* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4372,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4406 * Enable some error detection, note the instruction error mask 4372 * Enable some error detection, note the instruction error mask
4407 * bit is reserved, so we leave it masked. 4373 * bit is reserved, so we leave it masked.
4408 */ 4374 */
4409 if (IS_G4X(dev)) { 4375 if (IS_G4X(dev_priv)) {
4410 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4376 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4411 GM45_ERROR_MEM_PRIV | 4377 GM45_ERROR_MEM_PRIV |
4412 GM45_ERROR_CP_PRIV | 4378 GM45_ERROR_CP_PRIV |
@@ -4424,26 +4390,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
4424 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4390 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4425 POSTING_READ(PORT_HOTPLUG_EN); 4391 POSTING_READ(PORT_HOTPLUG_EN);
4426 4392
4427 i915_enable_asle_pipestat(dev); 4393 i915_enable_asle_pipestat(dev_priv);
4428 4394
4429 return 0; 4395 return 0;
4430} 4396}
4431 4397
4432static void i915_hpd_irq_setup(struct drm_device *dev) 4398static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4433{ 4399{
4434 struct drm_i915_private *dev_priv = dev->dev_private;
4435 u32 hotplug_en; 4400 u32 hotplug_en;
4436 4401
4437 assert_spin_locked(&dev_priv->irq_lock); 4402 assert_spin_locked(&dev_priv->irq_lock);
4438 4403
4439 /* Note HDMI and DP share hotplug bits */ 4404 /* Note HDMI and DP share hotplug bits */
4440 /* enable bits are the same for all generations */ 4405 /* enable bits are the same for all generations */
4441 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); 4406 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4442 /* Programming the CRT detection parameters tends 4407 /* Programming the CRT detection parameters tends
4443 to generate a spurious hotplug event about three 4408 to generate a spurious hotplug event about three
4444 seconds later. So just do it once. 4409 seconds later. So just do it once.
4445 */ 4410 */
4446 if (IS_G4X(dev)) 4411 if (IS_G4X(dev_priv))
4447 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4412 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4448 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4413 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4449 4414
@@ -4510,7 +4475,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4510 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4475 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4511 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4476 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4512 if (hotplug_status) 4477 if (hotplug_status)
4513 i9xx_hpd_irq_handler(dev, hotplug_status); 4478 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4514 } 4479 }
4515 4480
4516 I915_WRITE(IIR, iir & ~flip_mask); 4481 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4488,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4523 4488
4524 for_each_pipe(dev_priv, pipe) { 4489 for_each_pipe(dev_priv, pipe) {
4525 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4490 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4526 i915_handle_vblank(dev, pipe, pipe, iir)) 4491 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4527 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4492 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4528 4493
4529 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4494 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4530 blc_event = true; 4495 blc_event = true;
4531 4496
4532 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4497 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4533 i9xx_pipe_crc_irq_handler(dev, pipe); 4498 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4534 4499
4535 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4500 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4536 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4501 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4537 } 4502 }
4538 4503
4539 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4504 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4540 intel_opregion_asle_intr(dev); 4505 intel_opregion_asle_intr(dev_priv);
4541 4506
4542 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4507 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4543 gmbus_irq_handler(dev); 4508 gmbus_irq_handler(dev_priv);
4544 4509
4545 /* With MSI, interrupts are only generated when iir 4510 /* With MSI, interrupts are only generated when iir
4546 * transitions from zero to nonzero. If another bit got 4511 * transitions from zero to nonzero. If another bit got
@@ -4674,12 +4639,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4674 dev->driver->disable_vblank = ironlake_disable_vblank; 4639 dev->driver->disable_vblank = ironlake_disable_vblank;
4675 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4640 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4676 } else { 4641 } else {
4677 if (INTEL_INFO(dev_priv)->gen == 2) { 4642 if (IS_GEN2(dev_priv)) {
4678 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4643 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4679 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4644 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4680 dev->driver->irq_handler = i8xx_irq_handler; 4645 dev->driver->irq_handler = i8xx_irq_handler;
4681 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4646 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4682 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4647 } else if (IS_GEN3(dev_priv)) {
4683 dev->driver->irq_preinstall = i915_irq_preinstall; 4648 dev->driver->irq_preinstall = i915_irq_preinstall;
4684 dev->driver->irq_postinstall = i915_irq_postinstall; 4649 dev->driver->irq_postinstall = i915_irq_postinstall;
4685 dev->driver->irq_uninstall = i915_irq_uninstall; 4650 dev->driver->irq_uninstall = i915_irq_uninstall;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1779f02e6df8..383c076919ed 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -58,6 +58,7 @@ struct i915_params i915 __read_mostly = {
58 .guc_log_level = -1, 58 .guc_log_level = -1,
59 .enable_dp_mst = true, 59 .enable_dp_mst = true,
60 .inject_load_failure = 0, 60 .inject_load_failure = 0,
61 .enable_dpcd_backlight = false,
61}; 62};
62 63
63module_param_named(modeset, i915.modeset, int, 0400); 64module_param_named(modeset, i915.modeset, int, 0400);
@@ -210,3 +211,6 @@ MODULE_PARM_DESC(enable_dp_mst,
210module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); 211module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
211MODULE_PARM_DESC(inject_load_failure, 212MODULE_PARM_DESC(inject_load_failure,
212 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); 213 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
214module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
215MODULE_PARM_DESC(enable_dpcd_backlight,
216 "Enable support for DPCD backlight control (default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 02bc27804291..65e73dd7d970 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -61,6 +61,7 @@ struct i915_params {
61 bool verbose_state_checks; 61 bool verbose_state_checks;
62 bool nuclear_pageflip; 62 bool nuclear_pageflip;
63 bool enable_dp_mst; 63 bool enable_dp_mst;
64 bool enable_dpcd_backlight;
64}; 65};
65 66
66extern struct i915_params i915 __read_mostly; 67extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b407411e31ba..e3077259541a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2449,6 +2449,8 @@ enum skl_disp_power_wells {
2449#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 2449#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
2450#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 2450#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
2451 2451
2452#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
2453
2452#define _FPA0 0x6040 2454#define _FPA0 0x6040
2453#define _FPA1 0x6044 2455#define _FPA1 0x6044
2454#define _FPB0 0x6048 2456#define _FPB0 0x6048
@@ -6031,6 +6033,7 @@ enum skl_disp_power_wells {
6031#define CHICKEN_PAR1_1 _MMIO(0x42080) 6033#define CHICKEN_PAR1_1 _MMIO(0x42080)
6032#define DPA_MASK_VBLANK_SRD (1 << 15) 6034#define DPA_MASK_VBLANK_SRD (1 << 15)
6033#define FORCE_ARB_IDLE_PLANES (1 << 14) 6035#define FORCE_ARB_IDLE_PLANES (1 << 14)
6036#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
6034 6037
6035#define _CHICKEN_PIPESL_1_A 0x420b0 6038#define _CHICKEN_PIPESL_1_A 0x420b0
6036#define _CHICKEN_PIPESL_1_B 0x420b4 6039#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -6089,7 +6092,14 @@ enum skl_disp_power_wells {
6089#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 6092#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
6090 6093
6091#define GEN8_L3SQCREG1 _MMIO(0xB100) 6094#define GEN8_L3SQCREG1 _MMIO(0xB100)
6092#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 6095/*
6096 * Note that on CHV the following has an off-by-one error wrt. to BSpec.
6097 * Using the formula in BSpec leads to a hang, while the formula here works
6098 * fine and matches the formulas for all other platforms. A BSpec change
6099 * request has been filed to clarify this.
6100 */
6101#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
6102#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
6093 6103
6094#define GEN7_L3CNTLREG1 _MMIO(0xB01C) 6104#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
6095#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 6105#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@@ -7557,14 +7567,15 @@ enum skl_disp_power_wells {
7557#define CDCLK_FREQ_540 (1<<26) 7567#define CDCLK_FREQ_540 (1<<26)
7558#define CDCLK_FREQ_337_308 (2<<26) 7568#define CDCLK_FREQ_337_308 (2<<26)
7559#define CDCLK_FREQ_675_617 (3<<26) 7569#define CDCLK_FREQ_675_617 (3<<26)
7560#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7561
7562#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) 7570#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
7563#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) 7571#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
7564#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) 7572#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
7565#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 7573#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
7566#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 7574#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
7575#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
7576#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
7567#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 7577#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
7578#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7568 7579
7569/* LCPLL_CTL */ 7580/* LCPLL_CTL */
7570#define LCPLL1_CTL _MMIO(0x46010) 7581#define LCPLL1_CTL _MMIO(0x46010)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2d576b7ff299..37b6444b8e22 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev,
43 u64 units = 128ULL, div = 100000ULL; 43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret; 44 u32 ret;
45 45
46 if (!intel_enable_rc6(dev)) 46 if (!intel_enable_rc6())
47 return 0; 47 return 0;
48 48
49 intel_runtime_pm_get(dev_priv); 49 intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
70static ssize_t 70static ssize_t
71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72{ 72{
73 struct drm_minor *dminor = dev_to_drm_minor(kdev); 73 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
74 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
75} 74}
76 75
77static ssize_t 76static ssize_t
@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
397 /* We still need *_set_rps to process the new max_delay and 396 /* We still need *_set_rps to process the new max_delay and
398 * update the interrupt limits and PMINTRMSK even though 397 * update the interrupt limits and PMINTRMSK even though
399 * frequency request may be unchanged. */ 398 * frequency request may be unchanged. */
400 intel_set_rps(dev, val); 399 intel_set_rps(dev_priv, val);
401 400
402 mutex_unlock(&dev_priv->rps.hw_lock); 401 mutex_unlock(&dev_priv->rps.hw_lock);
403 402
@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
461 /* We still need *_set_rps to process the new min_delay and 460 /* We still need *_set_rps to process the new min_delay and
462 * update the interrupt limits and PMINTRMSK even though 461 * update the interrupt limits and PMINTRMSK even though
463 * frequency request may be unchanged. */ 462 * frequency request may be unchanged. */
464 intel_set_rps(dev, val); 463 intel_set_rps(dev_priv, val);
465 464
466 mutex_unlock(&dev_priv->rps.hw_lock); 465 mutex_unlock(&dev_priv->rps.hw_lock);
467 466
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210097..20b2e4039792 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
462 ), 462 ),
463 463
464 TP_fast_assign( 464 TP_fast_assign(
465 __entry->dev = from->dev->primary->index; 465 __entry->dev = from->i915->dev->primary->index;
466 __entry->sync_from = from->id; 466 __entry->sync_from = from->id;
467 __entry->sync_to = to_req->engine->id; 467 __entry->sync_to = to_req->engine->id;
468 __entry->seqno = i915_gem_request_get_seqno(req); 468 __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
486 ), 486 ),
487 487
488 TP_fast_assign( 488 TP_fast_assign(
489 struct intel_engine_cs *engine = 489 __entry->dev = req->i915->dev->primary->index;
490 i915_gem_request_get_engine(req); 490 __entry->ring = req->engine->id;
491 __entry->dev = engine->dev->primary->index; 491 __entry->seqno = req->seqno;
492 __entry->ring = engine->id;
493 __entry->seqno = i915_gem_request_get_seqno(req);
494 __entry->flags = flags; 492 __entry->flags = flags;
495 i915_trace_irq_get(engine, req); 493 i915_trace_irq_get(req->engine, req);
496 ), 494 ),
497 495
498 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 496 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
511 ), 509 ),
512 510
513 TP_fast_assign( 511 TP_fast_assign(
514 __entry->dev = req->engine->dev->primary->index; 512 __entry->dev = req->i915->dev->primary->index;
515 __entry->ring = req->engine->id; 513 __entry->ring = req->engine->id;
516 __entry->invalidate = invalidate; 514 __entry->invalidate = invalidate;
517 __entry->flush = flush; 515 __entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
533 ), 531 ),
534 532
535 TP_fast_assign( 533 TP_fast_assign(
536 struct intel_engine_cs *engine = 534 __entry->dev = req->i915->dev->primary->index;
537 i915_gem_request_get_engine(req); 535 __entry->ring = req->engine->id;
538 __entry->dev = engine->dev->primary->index; 536 __entry->seqno = req->seqno;
539 __entry->ring = engine->id;
540 __entry->seqno = i915_gem_request_get_seqno(req);
541 ), 537 ),
542 538
543 TP_printk("dev=%u, ring=%u, seqno=%u", 539 TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
560 ), 556 ),
561 557
562 TP_fast_assign( 558 TP_fast_assign(
563 __entry->dev = engine->dev->primary->index; 559 __entry->dev = engine->i915->dev->primary->index;
564 __entry->ring = engine->id; 560 __entry->ring = engine->id;
565 __entry->seqno = engine->get_seqno(engine); 561 __entry->seqno = engine->get_seqno(engine);
566 ), 562 ),
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
597 * less desirable. 593 * less desirable.
598 */ 594 */
599 TP_fast_assign( 595 TP_fast_assign(
600 struct intel_engine_cs *engine = 596 __entry->dev = req->i915->dev->primary->index;
601 i915_gem_request_get_engine(req); 597 __entry->ring = req->engine->id;
602 __entry->dev = engine->dev->primary->index; 598 __entry->seqno = req->seqno;
603 __entry->ring = engine->id;
604 __entry->seqno = i915_gem_request_get_seqno(req);
605 __entry->blocking = 599 __entry->blocking =
606 mutex_is_locked(&engine->dev->struct_mutex); 600 mutex_is_locked(&req->i915->dev->struct_mutex);
607 ), 601 ),
608 602
609 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", 603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
792 __entry->ring = engine->id; 786 __entry->ring = engine->id;
793 __entry->to = to; 787 __entry->to = to;
794 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; 788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
795 __entry->dev = engine->dev->primary->index; 789 __entry->dev = engine->i915->dev->primary->index;
796 ), 790 ),
797 791
798 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", 792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d02efb8cad4d..d5a7a5e7ee7e 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -58,15 +58,14 @@
58 * This function is called at the initialization stage, to detect whether 58 * This function is called at the initialization stage, to detect whether
59 * running on a vGPU. 59 * running on a vGPU.
60 */ 60 */
61void i915_check_vgpu(struct drm_device *dev) 61void i915_check_vgpu(struct drm_i915_private *dev_priv)
62{ 62{
63 struct drm_i915_private *dev_priv = to_i915(dev);
64 uint64_t magic; 63 uint64_t magic;
65 uint32_t version; 64 uint32_t version;
66 65
67 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
68 67
69 if (!IS_HASWELL(dev)) 68 if (!IS_HASWELL(dev_priv))
70 return; 69 return;
71 70
72 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); 71 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
136 135
137/** 136/**
138 * intel_vgt_balloon - balloon out reserved graphics address trunks 137 * intel_vgt_balloon - balloon out reserved graphics address trunks
139 * @dev: drm device 138 * @dev_priv: i915 device
140 * 139 *
141 * This function is called at the initialization stage, to balloon out the 140 * This function is called at the initialization stage, to balloon out the
142 * graphic address space allocated to other vGPUs, by marking these spaces as 141 * graphic address space allocated to other vGPUs, by marking these spaces as
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c83b47b5f69..21ffcfea5f5d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -110,7 +110,7 @@ struct vgt_if {
110#define VGT_DRV_DISPLAY_NOT_READY 0 110#define VGT_DRV_DISPLAY_NOT_READY 0
111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ 111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
112 112
113extern void i915_check_vgpu(struct drm_device *dev); 113extern void i915_check_vgpu(struct drm_i915_private *dev_priv);
114extern int intel_vgt_balloon(struct drm_device *dev); 114extern int intel_vgt_balloon(struct drm_device *dev);
115extern void intel_vgt_deballoon(void); 115extern void intel_vgt_deballoon(void);
116 116
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 02a7527ce7bb..b9329c2a670a 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
621static int i915_audio_component_get_cdclk_freq(struct device *dev) 621static int i915_audio_component_get_cdclk_freq(struct device *dev)
622{ 622{
623 struct drm_i915_private *dev_priv = dev_to_i915(dev); 623 struct drm_i915_private *dev_priv = dev_to_i915(dev);
624 int ret;
625 624
626 if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) 625 if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
627 return -ENODEV; 626 return -ENODEV;
628 627
629 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 628 return dev_priv->cdclk_freq;
630 ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
631
632 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
633
634 return ret;
635} 629}
636 630
637static int i915_audio_component_sync_audio_rate(struct device *dev, 631static int i915_audio_component_sync_audio_rate(struct device *dev,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b235b6e88ead..8b68c4882fba 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -318,6 +318,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
318 return; 318 return;
319 } 319 }
320 320
321 dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
322 if (bdb->version >= 191 &&
323 get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
324 const struct bdb_lfp_backlight_control_method *method;
325
326 method = &backlight_data->backlight_control[panel_type];
327 dev_priv->vbt.backlight.type = method->type;
328 }
329
321 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 330 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
322 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 331 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
323 dev_priv->vbt.backlight.min_brightness = entry->min_brightness; 332 dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -763,6 +772,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
763 return; 772 return;
764 } 773 }
765 774
775 /*
776 * These fields are introduced from the VBT version 197 onwards,
777 * so making sure that these bits are set zero in the previous
778 * versions.
779 */
780 if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
781 dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
782 dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
783 }
784
766 /* We have mandatory mipi config blocks. Initialize as generic panel */ 785 /* We have mandatory mipi config blocks. Initialize as generic panel */
767 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 786 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
768} 787}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index ab0ea315eddb..8405b5a367d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,14 @@
30#ifndef _INTEL_BIOS_H_ 30#ifndef _INTEL_BIOS_H_
31#define _INTEL_BIOS_H_ 31#define _INTEL_BIOS_H_
32 32
33enum intel_backlight_type {
34 INTEL_BACKLIGHT_PMIC,
35 INTEL_BACKLIGHT_LPSS,
36 INTEL_BACKLIGHT_DISPLAY_DDI,
37 INTEL_BACKLIGHT_DSI_DCS,
38 INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
39};
40
33struct edp_power_seq { 41struct edp_power_seq {
34 u16 t1_t3; 42 u16 t1_t3;
35 u16 t8; 43 u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
113 u16 dual_link:2; 121 u16 dual_link:2;
114 u16 lane_cnt:2; 122 u16 lane_cnt:2;
115 u16 pixel_overlap:3; 123 u16 pixel_overlap:3;
116 u16 rsvd3:9; 124 u16 rgb_flip:1;
125#define DL_DCS_PORT_A 0x00
126#define DL_DCS_PORT_C 0x01
127#define DL_DCS_PORT_A_AND_C 0x02
128 u16 dl_dcs_cabc_ports:2;
129 u16 dl_dcs_backlight_ports:2;
130 u16 rsvd3:4;
117 131
118 u16 rsvd4; 132 u16 rsvd4;
119 133
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a34c23eceba0..2b3b428d9cd2 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,16 +41,22 @@
41 * be moved to FW_FAILED. 41 * be moved to FW_FAILED.
42 */ 42 */
43 43
44#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
45MODULE_FIRMWARE(I915_CSR_KBL);
46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
47
44#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 48#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
49MODULE_FIRMWARE(I915_CSR_SKL);
50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
51
45#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" 52#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
53MODULE_FIRMWARE(I915_CSR_BXT);
54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
46 55
47#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" 56#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
48 57
49MODULE_FIRMWARE(I915_CSR_SKL);
50MODULE_FIRMWARE(I915_CSR_BXT);
51 58
52#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) 59
53#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
54 60
55#define CSR_MAX_FW_SIZE 0x2FFF 61#define CSR_MAX_FW_SIZE 0x2FFF
56#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF 62#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
169 char substepping; 175 char substepping;
170}; 176};
171 177
172/*
173 * Kabylake derivated from Skylake H0, so SKL H0
174 * is the right firmware for KBL A0 (revid 0).
175 */
176static const struct stepping_info kbl_stepping_info[] = { 178static const struct stepping_info kbl_stepping_info[] = {
177 {'H', '0'}, {'I', '0'} 179 {'A', '0'}, {'B', '0'}, {'C', '0'},
180 {'D', '0'}, {'E', '0'}, {'F', '0'},
181 {'G', '0'}, {'H', '0'}, {'I', '0'},
178}; 182};
179 183
180static const struct stepping_info skl_stepping_info[] = { 184static const struct stepping_info skl_stepping_info[] = {
@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
298 302
299 csr->version = css_header->version; 303 csr->version = css_header->version;
300 304
301 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 305 if (IS_KABYLAKE(dev_priv)) {
306 required_min_version = KBL_CSR_VERSION_REQUIRED;
307 } else if (IS_SKYLAKE(dev_priv)) {
302 required_min_version = SKL_CSR_VERSION_REQUIRED; 308 required_min_version = SKL_CSR_VERSION_REQUIRED;
303 } else if (IS_BROXTON(dev_priv)) { 309 } else if (IS_BROXTON(dev_priv)) {
304 required_min_version = BXT_CSR_VERSION_REQUIRED; 310 required_min_version = BXT_CSR_VERSION_REQUIRED;
@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
446 if (!HAS_CSR(dev_priv)) 452 if (!HAS_CSR(dev_priv))
447 return; 453 return;
448 454
449 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 455 if (IS_KABYLAKE(dev_priv))
456 csr->fw_path = I915_CSR_KBL;
457 else if (IS_SKYLAKE(dev_priv))
450 csr->fw_path = I915_CSR_SKL; 458 csr->fw_path = I915_CSR_SKL;
451 else if (IS_BROXTON(dev_priv)) 459 else if (IS_BROXTON(dev_priv))
452 csr->fw_path = I915_CSR_BXT; 460 csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 01e523df363b..c454744dda0b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
948{ 948{
949 struct intel_shared_dpll *pll; 949 struct intel_shared_dpll *pll;
950 struct intel_dpll_hw_state *state; 950 struct intel_dpll_hw_state *state;
951 intel_clock_t clock; 951 struct dpll clock;
952 952
953 /* For DDI ports we always use a shared PLL. */ 953 /* For DDI ports we always use a shared PLL. */
954 if (WARN_ON(dpll == DPLL_ID_PRIVATE)) 954 if (WARN_ON(dpll == DPLL_ID_PRIVATE))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2113f401f0ba..3f8987b7ee48 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -48,6 +48,11 @@
48#include <linux/reservation.h> 48#include <linux/reservation.h>
49#include <linux/dma-buf.h> 49#include <linux/dma-buf.h>
50 50
51static bool is_mmio_work(struct intel_flip_work *work)
52{
53 return work->mmio_work.func;
54}
55
51/* Primary plane formats for gen <= 3 */ 56/* Primary plane formats for gen <= 3 */
52static const uint32_t i8xx_primary_formats[] = { 57static const uint32_t i8xx_primary_formats[] = {
53 DRM_FORMAT_C8, 58 DRM_FORMAT_C8,
@@ -117,20 +122,17 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc); 122static void ironlake_pfit_enable(struct intel_crtc *crtc);
118static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
119static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
120 126
121typedef struct {
122 int min, max;
123} intel_range_t;
124
125typedef struct {
126 int dot_limit;
127 int p2_slow, p2_fast;
128} intel_p2_t;
129
130typedef struct intel_limit intel_limit_t;
131struct intel_limit { 127struct intel_limit {
132 intel_range_t dot, vco, n, m, m1, m2, p, p1; 128 struct {
133 intel_p2_t p2; 129 int min, max;
130 } dot, vco, n, m, m1, m2, p, p1;
131
132 struct {
133 int dot_limit;
134 int p2_slow, p2_fast;
135 } p2;
134}; 136};
135 137
136/* returns HPLL frequency in kHz */ 138/* returns HPLL frequency in kHz */
@@ -185,6 +187,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
185static int 187static int
186intel_vlv_hrawclk(struct drm_i915_private *dev_priv) 188intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
187{ 189{
190 /* RAWCLK_FREQ_VLV register updated from power well code */
188 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 191 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
189 CCK_DISPLAY_REF_CLOCK_CONTROL); 192 CCK_DISPLAY_REF_CLOCK_CONTROL);
190} 193}
@@ -218,7 +221,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
218 } 221 }
219} 222}
220 223
221static void intel_update_rawclk(struct drm_i915_private *dev_priv) 224void intel_update_rawclk(struct drm_i915_private *dev_priv)
222{ 225{
223 if (HAS_PCH_SPLIT(dev_priv)) 226 if (HAS_PCH_SPLIT(dev_priv))
224 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); 227 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +258,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
255 return 270000; 258 return 270000;
256} 259}
257 260
258static const intel_limit_t intel_limits_i8xx_dac = { 261static const struct intel_limit intel_limits_i8xx_dac = {
259 .dot = { .min = 25000, .max = 350000 }, 262 .dot = { .min = 25000, .max = 350000 },
260 .vco = { .min = 908000, .max = 1512000 }, 263 .vco = { .min = 908000, .max = 1512000 },
261 .n = { .min = 2, .max = 16 }, 264 .n = { .min = 2, .max = 16 },
@@ -268,7 +271,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
268 .p2_slow = 4, .p2_fast = 2 }, 271 .p2_slow = 4, .p2_fast = 2 },
269}; 272};
270 273
271static const intel_limit_t intel_limits_i8xx_dvo = { 274static const struct intel_limit intel_limits_i8xx_dvo = {
272 .dot = { .min = 25000, .max = 350000 }, 275 .dot = { .min = 25000, .max = 350000 },
273 .vco = { .min = 908000, .max = 1512000 }, 276 .vco = { .min = 908000, .max = 1512000 },
274 .n = { .min = 2, .max = 16 }, 277 .n = { .min = 2, .max = 16 },
@@ -281,7 +284,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
281 .p2_slow = 4, .p2_fast = 4 }, 284 .p2_slow = 4, .p2_fast = 4 },
282}; 285};
283 286
284static const intel_limit_t intel_limits_i8xx_lvds = { 287static const struct intel_limit intel_limits_i8xx_lvds = {
285 .dot = { .min = 25000, .max = 350000 }, 288 .dot = { .min = 25000, .max = 350000 },
286 .vco = { .min = 908000, .max = 1512000 }, 289 .vco = { .min = 908000, .max = 1512000 },
287 .n = { .min = 2, .max = 16 }, 290 .n = { .min = 2, .max = 16 },
@@ -294,7 +297,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
294 .p2_slow = 14, .p2_fast = 7 }, 297 .p2_slow = 14, .p2_fast = 7 },
295}; 298};
296 299
297static const intel_limit_t intel_limits_i9xx_sdvo = { 300static const struct intel_limit intel_limits_i9xx_sdvo = {
298 .dot = { .min = 20000, .max = 400000 }, 301 .dot = { .min = 20000, .max = 400000 },
299 .vco = { .min = 1400000, .max = 2800000 }, 302 .vco = { .min = 1400000, .max = 2800000 },
300 .n = { .min = 1, .max = 6 }, 303 .n = { .min = 1, .max = 6 },
@@ -307,7 +310,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
307 .p2_slow = 10, .p2_fast = 5 }, 310 .p2_slow = 10, .p2_fast = 5 },
308}; 311};
309 312
310static const intel_limit_t intel_limits_i9xx_lvds = { 313static const struct intel_limit intel_limits_i9xx_lvds = {
311 .dot = { .min = 20000, .max = 400000 }, 314 .dot = { .min = 20000, .max = 400000 },
312 .vco = { .min = 1400000, .max = 2800000 }, 315 .vco = { .min = 1400000, .max = 2800000 },
313 .n = { .min = 1, .max = 6 }, 316 .n = { .min = 1, .max = 6 },
@@ -321,7 +324,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
321}; 324};
322 325
323 326
324static const intel_limit_t intel_limits_g4x_sdvo = { 327static const struct intel_limit intel_limits_g4x_sdvo = {
325 .dot = { .min = 25000, .max = 270000 }, 328 .dot = { .min = 25000, .max = 270000 },
326 .vco = { .min = 1750000, .max = 3500000}, 329 .vco = { .min = 1750000, .max = 3500000},
327 .n = { .min = 1, .max = 4 }, 330 .n = { .min = 1, .max = 4 },
@@ -336,7 +339,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
336 }, 339 },
337}; 340};
338 341
339static const intel_limit_t intel_limits_g4x_hdmi = { 342static const struct intel_limit intel_limits_g4x_hdmi = {
340 .dot = { .min = 22000, .max = 400000 }, 343 .dot = { .min = 22000, .max = 400000 },
341 .vco = { .min = 1750000, .max = 3500000}, 344 .vco = { .min = 1750000, .max = 3500000},
342 .n = { .min = 1, .max = 4 }, 345 .n = { .min = 1, .max = 4 },
@@ -349,7 +352,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
349 .p2_slow = 10, .p2_fast = 5 }, 352 .p2_slow = 10, .p2_fast = 5 },
350}; 353};
351 354
352static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 355static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
353 .dot = { .min = 20000, .max = 115000 }, 356 .dot = { .min = 20000, .max = 115000 },
354 .vco = { .min = 1750000, .max = 3500000 }, 357 .vco = { .min = 1750000, .max = 3500000 },
355 .n = { .min = 1, .max = 3 }, 358 .n = { .min = 1, .max = 3 },
@@ -363,7 +366,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
363 }, 366 },
364}; 367};
365 368
366static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 369static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
367 .dot = { .min = 80000, .max = 224000 }, 370 .dot = { .min = 80000, .max = 224000 },
368 .vco = { .min = 1750000, .max = 3500000 }, 371 .vco = { .min = 1750000, .max = 3500000 },
369 .n = { .min = 1, .max = 3 }, 372 .n = { .min = 1, .max = 3 },
@@ -377,7 +380,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
377 }, 380 },
378}; 381};
379 382
380static const intel_limit_t intel_limits_pineview_sdvo = { 383static const struct intel_limit intel_limits_pineview_sdvo = {
381 .dot = { .min = 20000, .max = 400000}, 384 .dot = { .min = 20000, .max = 400000},
382 .vco = { .min = 1700000, .max = 3500000 }, 385 .vco = { .min = 1700000, .max = 3500000 },
383 /* Pineview's Ncounter is a ring counter */ 386 /* Pineview's Ncounter is a ring counter */
@@ -392,7 +395,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
392 .p2_slow = 10, .p2_fast = 5 }, 395 .p2_slow = 10, .p2_fast = 5 },
393}; 396};
394 397
395static const intel_limit_t intel_limits_pineview_lvds = { 398static const struct intel_limit intel_limits_pineview_lvds = {
396 .dot = { .min = 20000, .max = 400000 }, 399 .dot = { .min = 20000, .max = 400000 },
397 .vco = { .min = 1700000, .max = 3500000 }, 400 .vco = { .min = 1700000, .max = 3500000 },
398 .n = { .min = 3, .max = 6 }, 401 .n = { .min = 3, .max = 6 },
@@ -410,7 +413,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
410 * We calculate clock using (register_value + 2) for N/M1/M2, so here 413 * We calculate clock using (register_value + 2) for N/M1/M2, so here
411 * the range value for them is (actual_value - 2). 414 * the range value for them is (actual_value - 2).
412 */ 415 */
413static const intel_limit_t intel_limits_ironlake_dac = { 416static const struct intel_limit intel_limits_ironlake_dac = {
414 .dot = { .min = 25000, .max = 350000 }, 417 .dot = { .min = 25000, .max = 350000 },
415 .vco = { .min = 1760000, .max = 3510000 }, 418 .vco = { .min = 1760000, .max = 3510000 },
416 .n = { .min = 1, .max = 5 }, 419 .n = { .min = 1, .max = 5 },
@@ -423,7 +426,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
423 .p2_slow = 10, .p2_fast = 5 }, 426 .p2_slow = 10, .p2_fast = 5 },
424}; 427};
425 428
426static const intel_limit_t intel_limits_ironlake_single_lvds = { 429static const struct intel_limit intel_limits_ironlake_single_lvds = {
427 .dot = { .min = 25000, .max = 350000 }, 430 .dot = { .min = 25000, .max = 350000 },
428 .vco = { .min = 1760000, .max = 3510000 }, 431 .vco = { .min = 1760000, .max = 3510000 },
429 .n = { .min = 1, .max = 3 }, 432 .n = { .min = 1, .max = 3 },
@@ -436,7 +439,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
436 .p2_slow = 14, .p2_fast = 14 }, 439 .p2_slow = 14, .p2_fast = 14 },
437}; 440};
438 441
439static const intel_limit_t intel_limits_ironlake_dual_lvds = { 442static const struct intel_limit intel_limits_ironlake_dual_lvds = {
440 .dot = { .min = 25000, .max = 350000 }, 443 .dot = { .min = 25000, .max = 350000 },
441 .vco = { .min = 1760000, .max = 3510000 }, 444 .vco = { .min = 1760000, .max = 3510000 },
442 .n = { .min = 1, .max = 3 }, 445 .n = { .min = 1, .max = 3 },
@@ -450,7 +453,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
450}; 453};
451 454
452/* LVDS 100mhz refclk limits. */ 455/* LVDS 100mhz refclk limits. */
453static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 456static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
454 .dot = { .min = 25000, .max = 350000 }, 457 .dot = { .min = 25000, .max = 350000 },
455 .vco = { .min = 1760000, .max = 3510000 }, 458 .vco = { .min = 1760000, .max = 3510000 },
456 .n = { .min = 1, .max = 2 }, 459 .n = { .min = 1, .max = 2 },
@@ -463,7 +466,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
463 .p2_slow = 14, .p2_fast = 14 }, 466 .p2_slow = 14, .p2_fast = 14 },
464}; 467};
465 468
466static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 469static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
467 .dot = { .min = 25000, .max = 350000 }, 470 .dot = { .min = 25000, .max = 350000 },
468 .vco = { .min = 1760000, .max = 3510000 }, 471 .vco = { .min = 1760000, .max = 3510000 },
469 .n = { .min = 1, .max = 3 }, 472 .n = { .min = 1, .max = 3 },
@@ -476,7 +479,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
476 .p2_slow = 7, .p2_fast = 7 }, 479 .p2_slow = 7, .p2_fast = 7 },
477}; 480};
478 481
479static const intel_limit_t intel_limits_vlv = { 482static const struct intel_limit intel_limits_vlv = {
480 /* 483 /*
481 * These are the data rate limits (measured in fast clocks) 484 * These are the data rate limits (measured in fast clocks)
482 * since those are the strictest limits we have. The fast 485 * since those are the strictest limits we have. The fast
@@ -492,7 +495,7 @@ static const intel_limit_t intel_limits_vlv = {
492 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 495 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
493}; 496};
494 497
495static const intel_limit_t intel_limits_chv = { 498static const struct intel_limit intel_limits_chv = {
496 /* 499 /*
497 * These are the data rate limits (measured in fast clocks) 500 * These are the data rate limits (measured in fast clocks)
498 * since those are the strictest limits we have. The fast 501 * since those are the strictest limits we have. The fast
@@ -508,7 +511,7 @@ static const intel_limit_t intel_limits_chv = {
508 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 511 .p2 = { .p2_slow = 1, .p2_fast = 14 },
509}; 512};
510 513
511static const intel_limit_t intel_limits_bxt = { 514static const struct intel_limit intel_limits_bxt = {
512 /* FIXME: find real dot limits */ 515 /* FIXME: find real dot limits */
513 .dot = { .min = 0, .max = INT_MAX }, 516 .dot = { .min = 0, .max = INT_MAX },
514 .vco = { .min = 4800000, .max = 6700000 }, 517 .vco = { .min = 4800000, .max = 6700000 },
@@ -581,7 +584,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
581 * divided-down version of it. 584 * divided-down version of it.
582 */ 585 */
583/* m1 is reserved as 0 in Pineview, n is a ring counter */ 586/* m1 is reserved as 0 in Pineview, n is a ring counter */
584static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 587static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
585{ 588{
586 clock->m = clock->m2 + 2; 589 clock->m = clock->m2 + 2;
587 clock->p = clock->p1 * clock->p2; 590 clock->p = clock->p1 * clock->p2;
@@ -598,7 +601,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
598 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 601 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
599} 602}
600 603
601static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 604static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
602{ 605{
603 clock->m = i9xx_dpll_compute_m(clock); 606 clock->m = i9xx_dpll_compute_m(clock);
604 clock->p = clock->p1 * clock->p2; 607 clock->p = clock->p1 * clock->p2;
@@ -610,7 +613,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
610 return clock->dot; 613 return clock->dot;
611} 614}
612 615
613static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 616static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
614{ 617{
615 clock->m = clock->m1 * clock->m2; 618 clock->m = clock->m1 * clock->m2;
616 clock->p = clock->p1 * clock->p2; 619 clock->p = clock->p1 * clock->p2;
@@ -622,7 +625,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
622 return clock->dot / 5; 625 return clock->dot / 5;
623} 626}
624 627
625int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 628int chv_calc_dpll_params(int refclk, struct dpll *clock)
626{ 629{
627 clock->m = clock->m1 * clock->m2; 630 clock->m = clock->m1 * clock->m2;
628 clock->p = clock->p1 * clock->p2; 631 clock->p = clock->p1 * clock->p2;
@@ -642,8 +645,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
642 */ 645 */
643 646
644static bool intel_PLL_is_valid(struct drm_device *dev, 647static bool intel_PLL_is_valid(struct drm_device *dev,
645 const intel_limit_t *limit, 648 const struct intel_limit *limit,
646 const intel_clock_t *clock) 649 const struct dpll *clock)
647{ 650{
648 if (clock->n < limit->n.min || limit->n.max < clock->n) 651 if (clock->n < limit->n.min || limit->n.max < clock->n)
649 INTELPllInvalid("n out of range\n"); 652 INTELPllInvalid("n out of range\n");
@@ -678,7 +681,7 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
678} 681}
679 682
680static int 683static int
681i9xx_select_p2_div(const intel_limit_t *limit, 684i9xx_select_p2_div(const struct intel_limit *limit,
682 const struct intel_crtc_state *crtc_state, 685 const struct intel_crtc_state *crtc_state,
683 int target) 686 int target)
684{ 687{
@@ -713,13 +716,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
713 * divider from @match_clock used for LVDS downclocking. 716 * divider from @match_clock used for LVDS downclocking.
714 */ 717 */
715static bool 718static bool
716i9xx_find_best_dpll(const intel_limit_t *limit, 719i9xx_find_best_dpll(const struct intel_limit *limit,
717 struct intel_crtc_state *crtc_state, 720 struct intel_crtc_state *crtc_state,
718 int target, int refclk, intel_clock_t *match_clock, 721 int target, int refclk, struct dpll *match_clock,
719 intel_clock_t *best_clock) 722 struct dpll *best_clock)
720{ 723{
721 struct drm_device *dev = crtc_state->base.crtc->dev; 724 struct drm_device *dev = crtc_state->base.crtc->dev;
722 intel_clock_t clock; 725 struct dpll clock;
723 int err = target; 726 int err = target;
724 727
725 memset(best_clock, 0, sizeof(*best_clock)); 728 memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +773,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
770 * divider from @match_clock used for LVDS downclocking. 773 * divider from @match_clock used for LVDS downclocking.
771 */ 774 */
772static bool 775static bool
773pnv_find_best_dpll(const intel_limit_t *limit, 776pnv_find_best_dpll(const struct intel_limit *limit,
774 struct intel_crtc_state *crtc_state, 777 struct intel_crtc_state *crtc_state,
775 int target, int refclk, intel_clock_t *match_clock, 778 int target, int refclk, struct dpll *match_clock,
776 intel_clock_t *best_clock) 779 struct dpll *best_clock)
777{ 780{
778 struct drm_device *dev = crtc_state->base.crtc->dev; 781 struct drm_device *dev = crtc_state->base.crtc->dev;
779 intel_clock_t clock; 782 struct dpll clock;
780 int err = target; 783 int err = target;
781 784
782 memset(best_clock, 0, sizeof(*best_clock)); 785 memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +828,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
825 * divider from @match_clock used for LVDS downclocking. 828 * divider from @match_clock used for LVDS downclocking.
826 */ 829 */
827static bool 830static bool
828g4x_find_best_dpll(const intel_limit_t *limit, 831g4x_find_best_dpll(const struct intel_limit *limit,
829 struct intel_crtc_state *crtc_state, 832 struct intel_crtc_state *crtc_state,
830 int target, int refclk, intel_clock_t *match_clock, 833 int target, int refclk, struct dpll *match_clock,
831 intel_clock_t *best_clock) 834 struct dpll *best_clock)
832{ 835{
833 struct drm_device *dev = crtc_state->base.crtc->dev; 836 struct drm_device *dev = crtc_state->base.crtc->dev;
834 intel_clock_t clock; 837 struct dpll clock;
835 int max_n; 838 int max_n;
836 bool found = false; 839 bool found = false;
837 /* approximately equals target * 0.00585 */ 840 /* approximately equals target * 0.00585 */
@@ -877,8 +880,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
877 * best configuration and error found so far. Return the calculated error. 880 * best configuration and error found so far. Return the calculated error.
878 */ 881 */
879static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 882static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
880 const intel_clock_t *calculated_clock, 883 const struct dpll *calculated_clock,
881 const intel_clock_t *best_clock, 884 const struct dpll *best_clock,
882 unsigned int best_error_ppm, 885 unsigned int best_error_ppm,
883 unsigned int *error_ppm) 886 unsigned int *error_ppm)
884{ 887{
@@ -918,14 +921,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
918 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 921 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
919 */ 922 */
920static bool 923static bool
921vlv_find_best_dpll(const intel_limit_t *limit, 924vlv_find_best_dpll(const struct intel_limit *limit,
922 struct intel_crtc_state *crtc_state, 925 struct intel_crtc_state *crtc_state,
923 int target, int refclk, intel_clock_t *match_clock, 926 int target, int refclk, struct dpll *match_clock,
924 intel_clock_t *best_clock) 927 struct dpll *best_clock)
925{ 928{
926 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 929 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
927 struct drm_device *dev = crtc->base.dev; 930 struct drm_device *dev = crtc->base.dev;
928 intel_clock_t clock; 931 struct dpll clock;
929 unsigned int bestppm = 1000000; 932 unsigned int bestppm = 1000000;
930 /* min update 19.2 MHz */ 933 /* min update 19.2 MHz */
931 int max_n = min(limit->n.max, refclk / 19200); 934 int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +980,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
977 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 980 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
978 */ 981 */
979static bool 982static bool
980chv_find_best_dpll(const intel_limit_t *limit, 983chv_find_best_dpll(const struct intel_limit *limit,
981 struct intel_crtc_state *crtc_state, 984 struct intel_crtc_state *crtc_state,
982 int target, int refclk, intel_clock_t *match_clock, 985 int target, int refclk, struct dpll *match_clock,
983 intel_clock_t *best_clock) 986 struct dpll *best_clock)
984{ 987{
985 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 988 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
986 struct drm_device *dev = crtc->base.dev; 989 struct drm_device *dev = crtc->base.dev;
987 unsigned int best_error_ppm; 990 unsigned int best_error_ppm;
988 intel_clock_t clock; 991 struct dpll clock;
989 uint64_t m2; 992 uint64_t m2;
990 int found = false; 993 int found = false;
991 994
@@ -1035,10 +1038,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
1035} 1038}
1036 1039
1037bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1040bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1038 intel_clock_t *best_clock) 1041 struct dpll *best_clock)
1039{ 1042{
1040 int refclk = 100000; 1043 int refclk = 100000;
1041 const intel_limit_t *limit = &intel_limits_bxt; 1044 const struct intel_limit *limit = &intel_limits_bxt;
1042 1045
1043 return chv_find_best_dpll(limit, crtc_state, 1046 return chv_find_best_dpll(limit, crtc_state,
1044 target_clock, refclk, NULL, best_clock); 1047 target_clock, refclk, NULL, best_clock);
@@ -1203,7 +1206,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1203 u32 val; 1206 u32 val;
1204 1207
1205 /* ILK FDI PLL is always enabled */ 1208 /* ILK FDI PLL is always enabled */
1206 if (INTEL_INFO(dev_priv)->gen == 5) 1209 if (IS_GEN5(dev_priv))
1207 return; 1210 return;
1208 1211
1209 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1212 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -2309,7 +2312,7 @@ err_pm:
2309 return ret; 2312 return ret;
2310} 2313}
2311 2314
2312static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2315void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2313{ 2316{
2314 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2317 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2315 struct i915_ggtt_view view; 2318 struct i915_ggtt_view view;
@@ -3110,17 +3113,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3110 return -ENODEV; 3113 return -ENODEV;
3111} 3114}
3112 3115
3113static void intel_complete_page_flips(struct drm_device *dev) 3116static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3114{ 3117{
3115 struct drm_crtc *crtc; 3118 struct intel_crtc *crtc;
3116
3117 for_each_crtc(dev, crtc) {
3118 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3119 enum plane plane = intel_crtc->plane;
3120 3119
3121 intel_prepare_page_flip(dev, plane); 3120 for_each_intel_crtc(dev_priv->dev, crtc)
3122 intel_finish_page_flip_plane(dev, plane); 3121 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3123 }
3124} 3122}
3125 3123
3126static void intel_update_primary_planes(struct drm_device *dev) 3124static void intel_update_primary_planes(struct drm_device *dev)
@@ -3143,41 +3141,39 @@ static void intel_update_primary_planes(struct drm_device *dev)
3143 } 3141 }
3144} 3142}
3145 3143
3146void intel_prepare_reset(struct drm_device *dev) 3144void intel_prepare_reset(struct drm_i915_private *dev_priv)
3147{ 3145{
3148 /* no reset support for gen2 */ 3146 /* no reset support for gen2 */
3149 if (IS_GEN2(dev)) 3147 if (IS_GEN2(dev_priv))
3150 return; 3148 return;
3151 3149
3152 /* reset doesn't touch the display */ 3150 /* reset doesn't touch the display */
3153 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3151 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3154 return; 3152 return;
3155 3153
3156 drm_modeset_lock_all(dev); 3154 drm_modeset_lock_all(dev_priv->dev);
3157 /* 3155 /*
3158 * Disabling the crtcs gracefully seems nicer. Also the 3156 * Disabling the crtcs gracefully seems nicer. Also the
3159 * g33 docs say we should at least disable all the planes. 3157 * g33 docs say we should at least disable all the planes.
3160 */ 3158 */
3161 intel_display_suspend(dev); 3159 intel_display_suspend(dev_priv->dev);
3162} 3160}
3163 3161
3164void intel_finish_reset(struct drm_device *dev) 3162void intel_finish_reset(struct drm_i915_private *dev_priv)
3165{ 3163{
3166 struct drm_i915_private *dev_priv = to_i915(dev);
3167
3168 /* 3164 /*
3169 * Flips in the rings will be nuked by the reset, 3165 * Flips in the rings will be nuked by the reset,
3170 * so complete all pending flips so that user space 3166 * so complete all pending flips so that user space
3171 * will get its events and not get stuck. 3167 * will get its events and not get stuck.
3172 */ 3168 */
3173 intel_complete_page_flips(dev); 3169 intel_complete_page_flips(dev_priv);
3174 3170
3175 /* no reset support for gen2 */ 3171 /* no reset support for gen2 */
3176 if (IS_GEN2(dev)) 3172 if (IS_GEN2(dev_priv))
3177 return; 3173 return;
3178 3174
3179 /* reset doesn't touch the display */ 3175 /* reset doesn't touch the display */
3180 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3176 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3181 /* 3177 /*
3182 * Flips in the rings have been nuked by the reset, 3178 * Flips in the rings have been nuked by the reset,
3183 * so update the base address of all primary 3179 * so update the base address of all primary
@@ -3187,7 +3183,7 @@ void intel_finish_reset(struct drm_device *dev)
3187 * FIXME: Atomic will make this obsolete since we won't schedule 3183 * FIXME: Atomic will make this obsolete since we won't schedule
3188 * CS-based flips (which might get lost in gpu resets) any more. 3184 * CS-based flips (which might get lost in gpu resets) any more.
3189 */ 3185 */
3190 intel_update_primary_planes(dev); 3186 intel_update_primary_planes(dev_priv->dev);
3191 return; 3187 return;
3192 } 3188 }
3193 3189
@@ -3198,18 +3194,18 @@ void intel_finish_reset(struct drm_device *dev)
3198 intel_runtime_pm_disable_interrupts(dev_priv); 3194 intel_runtime_pm_disable_interrupts(dev_priv);
3199 intel_runtime_pm_enable_interrupts(dev_priv); 3195 intel_runtime_pm_enable_interrupts(dev_priv);
3200 3196
3201 intel_modeset_init_hw(dev); 3197 intel_modeset_init_hw(dev_priv->dev);
3202 3198
3203 spin_lock_irq(&dev_priv->irq_lock); 3199 spin_lock_irq(&dev_priv->irq_lock);
3204 if (dev_priv->display.hpd_irq_setup) 3200 if (dev_priv->display.hpd_irq_setup)
3205 dev_priv->display.hpd_irq_setup(dev); 3201 dev_priv->display.hpd_irq_setup(dev_priv);
3206 spin_unlock_irq(&dev_priv->irq_lock); 3202 spin_unlock_irq(&dev_priv->irq_lock);
3207 3203
3208 intel_display_resume(dev); 3204 intel_display_resume(dev_priv->dev);
3209 3205
3210 intel_hpd_init(dev_priv); 3206 intel_hpd_init(dev_priv);
3211 3207
3212 drm_modeset_unlock_all(dev); 3208 drm_modeset_unlock_all(dev_priv->dev);
3213} 3209}
3214 3210
3215static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3211static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3220,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3224 return false; 3220 return false;
3225 3221
3226 spin_lock_irq(&dev->event_lock); 3222 spin_lock_irq(&dev->event_lock);
3227 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3223 pending = to_intel_crtc(crtc)->flip_work != NULL;
3228 spin_unlock_irq(&dev->event_lock); 3224 spin_unlock_irq(&dev->event_lock);
3229 3225
3230 return pending; 3226 return pending;
@@ -3803,7 +3799,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3803 if (atomic_read(&crtc->unpin_work_count) == 0) 3799 if (atomic_read(&crtc->unpin_work_count) == 0)
3804 continue; 3800 continue;
3805 3801
3806 if (crtc->unpin_work) 3802 if (crtc->flip_work)
3807 intel_wait_for_vblank(dev, crtc->pipe); 3803 intel_wait_for_vblank(dev, crtc->pipe);
3808 3804
3809 return true; 3805 return true;
@@ -3815,11 +3811,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3815static void page_flip_completed(struct intel_crtc *intel_crtc) 3811static void page_flip_completed(struct intel_crtc *intel_crtc)
3816{ 3812{
3817 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3813 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3818 struct intel_unpin_work *work = intel_crtc->unpin_work; 3814 struct intel_flip_work *work = intel_crtc->flip_work;
3819 3815
3820 /* ensure that the unpin work is consistent wrt ->pending. */ 3816 intel_crtc->flip_work = NULL;
3821 smp_rmb();
3822 intel_crtc->unpin_work = NULL;
3823 3817
3824 if (work->event) 3818 if (work->event)
3825 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3819 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3821,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3827 drm_crtc_vblank_put(&intel_crtc->base); 3821 drm_crtc_vblank_put(&intel_crtc->base);
3828 3822
3829 wake_up_all(&dev_priv->pending_flip_queue); 3823 wake_up_all(&dev_priv->pending_flip_queue);
3830 queue_work(dev_priv->wq, &work->work); 3824 queue_work(dev_priv->wq, &work->unpin_work);
3831 3825
3832 trace_i915_flip_complete(intel_crtc->plane, 3826 trace_i915_flip_complete(intel_crtc->plane,
3833 work->pending_flip_obj); 3827 work->pending_flip_obj);
@@ -3851,9 +3845,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3851 3845
3852 if (ret == 0) { 3846 if (ret == 0) {
3853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3847 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3848 struct intel_flip_work *work;
3854 3849
3855 spin_lock_irq(&dev->event_lock); 3850 spin_lock_irq(&dev->event_lock);
3856 if (intel_crtc->unpin_work) { 3851 work = intel_crtc->flip_work;
3852 if (work && !is_mmio_work(work)) {
3857 WARN_ONCE(1, "Removing stuck page flip\n"); 3853 WARN_ONCE(1, "Removing stuck page flip\n");
3858 page_flip_completed(intel_crtc); 3854 page_flip_completed(intel_crtc);
3859 } 3855 }
@@ -5328,32 +5324,33 @@ static void intel_update_cdclk(struct drm_device *dev)
5328 dev_priv->cdclk_freq); 5324 dev_priv->cdclk_freq);
5329 5325
5330 /* 5326 /*
5331 * Program the gmbus_freq based on the cdclk frequency. 5327 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5332 * BSpec erroneously claims we should aim for 4MHz, but 5328 * Programmng [sic] note: bit[9:2] should be programmed to the number
5333 * in fact 1MHz is the correct frequency. 5329 * of cdclk that generates 4MHz reference clock freq which is used to
5330 * generate GMBus clock. This will vary with the cdclk freq.
5334 */ 5331 */
5335 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 5332 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5336 /*
5337 * Program the gmbus_freq based on the cdclk frequency.
5338 * BSpec erroneously claims we should aim for 4MHz, but
5339 * in fact 1MHz is the correct frequency.
5340 */
5341 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5333 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5342 }
5343 5334
5344 if (dev_priv->max_cdclk_freq == 0) 5335 if (dev_priv->max_cdclk_freq == 0)
5345 intel_update_max_cdclk(dev); 5336 intel_update_max_cdclk(dev);
5346} 5337}
5347 5338
5348static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) 5339/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5340static int skl_cdclk_decimal(int cdclk)
5341{
5342 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5343}
5344
5345static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5349{ 5346{
5350 uint32_t divider; 5347 uint32_t divider;
5351 uint32_t ratio; 5348 uint32_t ratio;
5352 uint32_t current_freq; 5349 uint32_t current_cdclk;
5353 int ret; 5350 int ret;
5354 5351
5355 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5352 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5356 switch (frequency) { 5353 switch (cdclk) {
5357 case 144000: 5354 case 144000:
5358 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5355 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5359 ratio = BXT_DE_PLL_RATIO(60); 5356 ratio = BXT_DE_PLL_RATIO(60);
@@ -5383,7 +5380,7 @@ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5383 divider = 0; 5380 divider = 0;
5384 break; 5381 break;
5385 default: 5382 default:
5386 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5383 DRM_ERROR("unsupported CDCLK freq %d", cdclk);
5387 5384
5388 return; 5385 return;
5389 } 5386 }
@@ -5396,13 +5393,13 @@ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5396 5393
5397 if (ret) { 5394 if (ret) {
5398 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5395 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5399 ret, frequency); 5396 ret, cdclk);
5400 return; 5397 return;
5401 } 5398 }
5402 5399
5403 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5400 current_cdclk = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5404 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5401 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5405 current_freq = current_freq * 500 + 1000; 5402 current_cdclk = current_cdclk * 500 + 1000;
5406 5403
5407 /* 5404 /*
5408 * DE PLL has to be disabled when 5405 * DE PLL has to be disabled when
@@ -5410,8 +5407,8 @@ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5410 * - before setting to 624MHz (PLL needs toggling) 5407 * - before setting to 624MHz (PLL needs toggling)
5411 * - before setting to any frequency from 624MHz (PLL needs toggling) 5408 * - before setting to any frequency from 624MHz (PLL needs toggling)
5412 */ 5409 */
5413 if (frequency == 19200 || frequency == 624000 || 5410 if (cdclk == 19200 || cdclk == 624000 ||
5414 current_freq == 624000) { 5411 current_cdclk == 624000) {
5415 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); 5412 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5416 /* Timeout 200us */ 5413 /* Timeout 200us */
5417 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), 5414 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
@@ -5419,7 +5416,7 @@ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5419 DRM_ERROR("timout waiting for DE PLL unlock\n"); 5416 DRM_ERROR("timout waiting for DE PLL unlock\n");
5420 } 5417 }
5421 5418
5422 if (frequency != 19200) { 5419 if (cdclk != 19200) {
5423 uint32_t val; 5420 uint32_t val;
5424 5421
5425 val = I915_READ(BXT_DE_PLL_CTL); 5422 val = I915_READ(BXT_DE_PLL_CTL);
@@ -5432,31 +5429,29 @@ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5432 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) 5429 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5433 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5430 DRM_ERROR("timeout waiting for DE PLL lock\n");
5434 5431
5435 val = I915_READ(CDCLK_CTL); 5432 val = divider | skl_cdclk_decimal(cdclk);
5436 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; 5433 /*
5437 val |= divider; 5434 * FIXME if only the cd2x divider needs changing, it could be done
5435 * without shutting off the pipe (if only one pipe is active).
5436 */
5437 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5438 /* 5438 /*
5439 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5439 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5440 * enable otherwise. 5440 * enable otherwise.
5441 */ 5441 */
5442 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5442 if (cdclk >= 500000)
5443 if (frequency >= 500000)
5444 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5443 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5445
5446 val &= ~CDCLK_FREQ_DECIMAL_MASK;
5447 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5448 val |= (frequency - 1000) / 500;
5449 I915_WRITE(CDCLK_CTL, val); 5444 I915_WRITE(CDCLK_CTL, val);
5450 } 5445 }
5451 5446
5452 mutex_lock(&dev_priv->rps.hw_lock); 5447 mutex_lock(&dev_priv->rps.hw_lock);
5453 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5448 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5454 DIV_ROUND_UP(frequency, 25000)); 5449 DIV_ROUND_UP(cdclk, 25000));
5455 mutex_unlock(&dev_priv->rps.hw_lock); 5450 mutex_unlock(&dev_priv->rps.hw_lock);
5456 5451
5457 if (ret) { 5452 if (ret) {
5458 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5453 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5459 ret, frequency); 5454 ret, cdclk);
5460 return; 5455 return;
5461 } 5456 }
5462 5457
@@ -5545,11 +5540,6 @@ static const struct skl_cdclk_entry {
5545 { .freq = 675000, .vco = 8100 }, 5540 { .freq = 675000, .vco = 8100 },
5546}; 5541};
5547 5542
5548static unsigned int skl_cdclk_decimal(unsigned int freq)
5549{
5550 return (freq - 1000) / 500;
5551}
5552
5553static unsigned int skl_cdclk_get_vco(unsigned int freq) 5543static unsigned int skl_cdclk_get_vco(unsigned int freq)
5554{ 5544{
5555 unsigned int i; 5545 unsigned int i;
@@ -5565,23 +5555,18 @@ static unsigned int skl_cdclk_get_vco(unsigned int freq)
5565} 5555}
5566 5556
5567static void 5557static void
5568skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5558skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5569{ 5559{
5570 unsigned int min_freq; 5560 int min_cdclk;
5571 u32 val; 5561 u32 val;
5572 5562
5573 /* select the minimum CDCLK before enabling DPLL 0 */ 5563 /* select the minimum CDCLK before enabling DPLL 0 */
5574 val = I915_READ(CDCLK_CTL); 5564 if (vco == 8640)
5575 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; 5565 min_cdclk = 308570;
5576 val |= CDCLK_FREQ_337_308;
5577
5578 if (required_vco == 8640)
5579 min_freq = 308570;
5580 else 5566 else
5581 min_freq = 337500; 5567 min_cdclk = 337500;
5582
5583 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5584 5568
5569 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5585 I915_WRITE(CDCLK_CTL, val); 5570 I915_WRITE(CDCLK_CTL, val);
5586 POSTING_READ(CDCLK_CTL); 5571 POSTING_READ(CDCLK_CTL);
5587 5572
@@ -5599,7 +5584,7 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5599 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5584 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5600 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5585 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5601 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5586 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5602 if (required_vco == 8640) 5587 if (vco == 8640)
5603 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5588 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5604 SKL_DPLL0); 5589 SKL_DPLL0);
5605 else 5590 else
@@ -5615,6 +5600,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5615 DRM_ERROR("DPLL0 not locked\n"); 5600 DRM_ERROR("DPLL0 not locked\n");
5616} 5601}
5617 5602
5603static void
5604skl_dpll0_disable(struct drm_i915_private *dev_priv)
5605{
5606 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5607 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5608 DRM_ERROR("Couldn't disable DPLL0\n");
5609}
5610
5618static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5611static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5619{ 5612{
5620 int ret; 5613 int ret;
@@ -5642,12 +5635,12 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5642 return false; 5635 return false;
5643} 5636}
5644 5637
5645static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5638static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5646{ 5639{
5647 struct drm_device *dev = dev_priv->dev; 5640 struct drm_device *dev = dev_priv->dev;
5648 u32 freq_select, pcu_ack; 5641 u32 freq_select, pcu_ack;
5649 5642
5650 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5643 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", cdclk);
5651 5644
5652 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5645 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5653 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5646 DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5648,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5655 } 5648 }
5656 5649
5657 /* set CDCLK_CTL */ 5650 /* set CDCLK_CTL */
5658 switch(freq) { 5651 switch (cdclk) {
5659 case 450000: 5652 case 450000:
5660 case 432000: 5653 case 432000:
5661 freq_select = CDCLK_FREQ_450_432; 5654 freq_select = CDCLK_FREQ_450_432;
@@ -5678,7 +5671,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5678 break; 5671 break;
5679 } 5672 }
5680 5673
5681 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5674 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5682 POSTING_READ(CDCLK_CTL); 5675 POSTING_READ(CDCLK_CTL);
5683 5676
5684 /* inform PCU of the change */ 5677 /* inform PCU of the change */
@@ -5700,21 +5693,18 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5700 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5693 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5701 DRM_ERROR("DBuf power disable timeout\n"); 5694 DRM_ERROR("DBuf power disable timeout\n");
5702 5695
5703 /* disable DPLL0 */ 5696 skl_dpll0_disable(dev_priv);
5704 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5705 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5706 DRM_ERROR("Couldn't disable DPLL0\n");
5707} 5697}
5708 5698
5709void skl_init_cdclk(struct drm_i915_private *dev_priv) 5699void skl_init_cdclk(struct drm_i915_private *dev_priv)
5710{ 5700{
5711 unsigned int required_vco; 5701 unsigned int vco;
5712 5702
5713 /* DPLL0 not enabled (happens on early BIOS versions) */ 5703 /* DPLL0 not enabled (happens on early BIOS versions) */
5714 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { 5704 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5715 /* enable DPLL0 */ 5705 /* enable DPLL0 */
5716 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); 5706 vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5717 skl_dpll0_enable(dev_priv, required_vco); 5707 skl_dpll0_enable(dev_priv, vco);
5718 } 5708 }
5719 5709
5720 /* set CDCLK to the frequency the BIOS chose */ 5710 /* set CDCLK to the frequency the BIOS chose */
@@ -5906,21 +5896,19 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5906 return 200000; 5896 return 200000;
5907} 5897}
5908 5898
5909static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 5899static int broxton_calc_cdclk(int max_pixclk)
5910 int max_pixclk)
5911{ 5900{
5912 /* 5901 /*
5913 * FIXME: 5902 * FIXME:
5914 * - remove the guardband, it's not needed on BXT
5915 * - set 19.2MHz bypass frequency if there are no active pipes 5903 * - set 19.2MHz bypass frequency if there are no active pipes
5916 */ 5904 */
5917 if (max_pixclk > 576000*9/10) 5905 if (max_pixclk > 576000)
5918 return 624000; 5906 return 624000;
5919 else if (max_pixclk > 384000*9/10) 5907 else if (max_pixclk > 384000)
5920 return 576000; 5908 return 576000;
5921 else if (max_pixclk > 288000*9/10) 5909 else if (max_pixclk > 288000)
5922 return 384000; 5910 return 384000;
5923 else if (max_pixclk > 144000*9/10) 5911 else if (max_pixclk > 144000)
5924 return 288000; 5912 return 288000;
5925 else 5913 else
5926 return 144000; 5914 return 144000;
@@ -5963,9 +5951,6 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5963 struct intel_atomic_state *intel_state = 5951 struct intel_atomic_state *intel_state =
5964 to_intel_atomic_state(state); 5952 to_intel_atomic_state(state);
5965 5953
5966 if (max_pixclk < 0)
5967 return max_pixclk;
5968
5969 intel_state->cdclk = intel_state->dev_cdclk = 5954 intel_state->cdclk = intel_state->dev_cdclk =
5970 valleyview_calc_cdclk(dev_priv, max_pixclk); 5955 valleyview_calc_cdclk(dev_priv, max_pixclk);
5971 5956
@@ -5977,20 +5962,15 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5977 5962
5978static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) 5963static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
5979{ 5964{
5980 struct drm_device *dev = state->dev; 5965 int max_pixclk = ilk_max_pixel_rate(state);
5981 struct drm_i915_private *dev_priv = dev->dev_private;
5982 int max_pixclk = intel_mode_max_pixclk(dev, state);
5983 struct intel_atomic_state *intel_state = 5966 struct intel_atomic_state *intel_state =
5984 to_intel_atomic_state(state); 5967 to_intel_atomic_state(state);
5985 5968
5986 if (max_pixclk < 0)
5987 return max_pixclk;
5988
5989 intel_state->cdclk = intel_state->dev_cdclk = 5969 intel_state->cdclk = intel_state->dev_cdclk =
5990 broxton_calc_cdclk(dev_priv, max_pixclk); 5970 broxton_calc_cdclk(max_pixclk);
5991 5971
5992 if (!intel_state->active_crtcs) 5972 if (!intel_state->active_crtcs)
5993 intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); 5973 intel_state->dev_cdclk = broxton_calc_cdclk(0);
5994 5974
5995 return 0; 5975 return 0;
5996} 5976}
@@ -6252,7 +6232,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6252 return; 6232 return;
6253 6233
6254 if (to_intel_plane_state(crtc->primary->state)->visible) { 6234 if (to_intel_plane_state(crtc->primary->state)->visible) {
6255 WARN_ON(intel_crtc->unpin_work); 6235 WARN_ON(intel_crtc->flip_work);
6256 6236
6257 intel_pre_disable_primary_noatomic(crtc); 6237 intel_pre_disable_primary_noatomic(crtc);
6258 6238
@@ -7063,7 +7043,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7063 7043
7064static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7044static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7065 struct intel_crtc_state *crtc_state, 7045 struct intel_crtc_state *crtc_state,
7066 intel_clock_t *reduced_clock) 7046 struct dpll *reduced_clock)
7067{ 7047{
7068 struct drm_device *dev = crtc->base.dev; 7048 struct drm_device *dev = crtc->base.dev;
7069 u32 fp, fp2 = 0; 7049 u32 fp, fp2 = 0;
@@ -7487,7 +7467,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7487 7467
7488static void i9xx_compute_dpll(struct intel_crtc *crtc, 7468static void i9xx_compute_dpll(struct intel_crtc *crtc,
7489 struct intel_crtc_state *crtc_state, 7469 struct intel_crtc_state *crtc_state,
7490 intel_clock_t *reduced_clock) 7470 struct dpll *reduced_clock)
7491{ 7471{
7492 struct drm_device *dev = crtc->base.dev; 7472 struct drm_device *dev = crtc->base.dev;
7493 struct drm_i915_private *dev_priv = dev->dev_private; 7473 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7563,7 +7543,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7563 7543
7564static void i8xx_compute_dpll(struct intel_crtc *crtc, 7544static void i8xx_compute_dpll(struct intel_crtc *crtc,
7565 struct intel_crtc_state *crtc_state, 7545 struct intel_crtc_state *crtc_state,
7566 intel_clock_t *reduced_clock) 7546 struct dpll *reduced_clock)
7567{ 7547{
7568 struct drm_device *dev = crtc->base.dev; 7548 struct drm_device *dev = crtc->base.dev;
7569 struct drm_i915_private *dev_priv = dev->dev_private; 7549 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7817,7 +7797,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7817{ 7797{
7818 struct drm_device *dev = crtc->base.dev; 7798 struct drm_device *dev = crtc->base.dev;
7819 struct drm_i915_private *dev_priv = dev->dev_private; 7799 struct drm_i915_private *dev_priv = dev->dev_private;
7820 const intel_limit_t *limit; 7800 const struct intel_limit *limit;
7821 int refclk = 48000; 7801 int refclk = 48000;
7822 7802
7823 memset(&crtc_state->dpll_hw_state, 0, 7803 memset(&crtc_state->dpll_hw_state, 0,
@@ -7853,7 +7833,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7853{ 7833{
7854 struct drm_device *dev = crtc->base.dev; 7834 struct drm_device *dev = crtc->base.dev;
7855 struct drm_i915_private *dev_priv = dev->dev_private; 7835 struct drm_i915_private *dev_priv = dev->dev_private;
7856 const intel_limit_t *limit; 7836 const struct intel_limit *limit;
7857 int refclk = 96000; 7837 int refclk = 96000;
7858 7838
7859 memset(&crtc_state->dpll_hw_state, 0, 7839 memset(&crtc_state->dpll_hw_state, 0,
@@ -7896,7 +7876,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7896{ 7876{
7897 struct drm_device *dev = crtc->base.dev; 7877 struct drm_device *dev = crtc->base.dev;
7898 struct drm_i915_private *dev_priv = dev->dev_private; 7878 struct drm_i915_private *dev_priv = dev->dev_private;
7899 const intel_limit_t *limit; 7879 const struct intel_limit *limit;
7900 int refclk = 96000; 7880 int refclk = 96000;
7901 7881
7902 memset(&crtc_state->dpll_hw_state, 0, 7882 memset(&crtc_state->dpll_hw_state, 0,
@@ -7930,7 +7910,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7930{ 7910{
7931 struct drm_device *dev = crtc->base.dev; 7911 struct drm_device *dev = crtc->base.dev;
7932 struct drm_i915_private *dev_priv = dev->dev_private; 7912 struct drm_i915_private *dev_priv = dev->dev_private;
7933 const intel_limit_t *limit; 7913 const struct intel_limit *limit;
7934 int refclk = 96000; 7914 int refclk = 96000;
7935 7915
7936 memset(&crtc_state->dpll_hw_state, 0, 7916 memset(&crtc_state->dpll_hw_state, 0,
@@ -7963,7 +7943,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7963 struct intel_crtc_state *crtc_state) 7943 struct intel_crtc_state *crtc_state)
7964{ 7944{
7965 int refclk = 100000; 7945 int refclk = 100000;
7966 const intel_limit_t *limit = &intel_limits_chv; 7946 const struct intel_limit *limit = &intel_limits_chv;
7967 7947
7968 memset(&crtc_state->dpll_hw_state, 0, 7948 memset(&crtc_state->dpll_hw_state, 0,
7969 sizeof(crtc_state->dpll_hw_state)); 7949 sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +7964,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7984 struct intel_crtc_state *crtc_state) 7964 struct intel_crtc_state *crtc_state)
7985{ 7965{
7986 int refclk = 100000; 7966 int refclk = 100000;
7987 const intel_limit_t *limit = &intel_limits_vlv; 7967 const struct intel_limit *limit = &intel_limits_vlv;
7988 7968
7989 memset(&crtc_state->dpll_hw_state, 0, 7969 memset(&crtc_state->dpll_hw_state, 0,
7990 sizeof(crtc_state->dpll_hw_state)); 7970 sizeof(crtc_state->dpll_hw_state));
@@ -8034,7 +8014,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8034 struct drm_device *dev = crtc->base.dev; 8014 struct drm_device *dev = crtc->base.dev;
8035 struct drm_i915_private *dev_priv = dev->dev_private; 8015 struct drm_i915_private *dev_priv = dev->dev_private;
8036 int pipe = pipe_config->cpu_transcoder; 8016 int pipe = pipe_config->cpu_transcoder;
8037 intel_clock_t clock; 8017 struct dpll clock;
8038 u32 mdiv; 8018 u32 mdiv;
8039 int refclk = 100000; 8019 int refclk = 100000;
8040 8020
@@ -8131,7 +8111,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
8131 struct drm_i915_private *dev_priv = dev->dev_private; 8111 struct drm_i915_private *dev_priv = dev->dev_private;
8132 int pipe = pipe_config->cpu_transcoder; 8112 int pipe = pipe_config->cpu_transcoder;
8133 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8113 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8134 intel_clock_t clock; 8114 struct dpll clock;
8135 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8115 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8136 int refclk = 100000; 8116 int refclk = 100000;
8137 8117
@@ -8794,7 +8774,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8794 8774
8795static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8775static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8796 struct intel_crtc_state *crtc_state, 8776 struct intel_crtc_state *crtc_state,
8797 intel_clock_t *reduced_clock) 8777 struct dpll *reduced_clock)
8798{ 8778{
8799 struct drm_crtc *crtc = &intel_crtc->base; 8779 struct drm_crtc *crtc = &intel_crtc->base;
8800 struct drm_device *dev = crtc->dev; 8780 struct drm_device *dev = crtc->dev;
@@ -8902,10 +8882,10 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8902{ 8882{
8903 struct drm_device *dev = crtc->base.dev; 8883 struct drm_device *dev = crtc->base.dev;
8904 struct drm_i915_private *dev_priv = dev->dev_private; 8884 struct drm_i915_private *dev_priv = dev->dev_private;
8905 intel_clock_t reduced_clock; 8885 struct dpll reduced_clock;
8906 bool has_reduced_clock = false; 8886 bool has_reduced_clock = false;
8907 struct intel_shared_dpll *pll; 8887 struct intel_shared_dpll *pll;
8908 const intel_limit_t *limit; 8888 const struct intel_limit *limit;
8909 int refclk = 120000; 8889 int refclk = 120000;
8910 8890
8911 memset(&crtc_state->dpll_hw_state, 0, 8891 memset(&crtc_state->dpll_hw_state, 0,
@@ -9300,6 +9280,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9300 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9280 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9301 9281
9302 if (HAS_PCH_IBX(dev_priv)) { 9282 if (HAS_PCH_IBX(dev_priv)) {
9283 /*
9284 * The pipe->pch transcoder and pch transcoder->pll
9285 * mapping is fixed.
9286 */
9303 pll_id = (enum intel_dpll_id) crtc->pipe; 9287 pll_id = (enum intel_dpll_id) crtc->pipe;
9304 } else { 9288 } else {
9305 tmp = I915_READ(PCH_DPLL_SEL); 9289 tmp = I915_READ(PCH_DPLL_SEL);
@@ -9687,6 +9671,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9687 cdclk, dev_priv->cdclk_freq); 9671 cdclk, dev_priv->cdclk_freq);
9688} 9672}
9689 9673
9674static int broadwell_calc_cdclk(int max_pixclk)
9675{
9676 if (max_pixclk > 540000)
9677 return 675000;
9678 else if (max_pixclk > 450000)
9679 return 540000;
9680 else if (max_pixclk > 337500)
9681 return 450000;
9682 else
9683 return 337500;
9684}
9685
9690static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9686static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9691{ 9687{
9692 struct drm_i915_private *dev_priv = to_i915(state->dev); 9688 struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9698,14 +9694,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9698 * FIXME should also account for plane ratio 9694 * FIXME should also account for plane ratio
9699 * once 64bpp pixel formats are supported. 9695 * once 64bpp pixel formats are supported.
9700 */ 9696 */
9701 if (max_pixclk > 540000) 9697 cdclk = broadwell_calc_cdclk(max_pixclk);
9702 cdclk = 675000;
9703 else if (max_pixclk > 450000)
9704 cdclk = 540000;
9705 else if (max_pixclk > 337500)
9706 cdclk = 450000;
9707 else
9708 cdclk = 337500;
9709 9698
9710 if (cdclk > dev_priv->max_cdclk_freq) { 9699 if (cdclk > dev_priv->max_cdclk_freq) {
9711 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9700 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9715,7 +9704,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9715 9704
9716 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9705 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9717 if (!intel_state->active_crtcs) 9706 if (!intel_state->active_crtcs)
9718 intel_state->dev_cdclk = 337500; 9707 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
9719 9708
9720 return 0; 9709 return 0;
9721} 9710}
@@ -9850,6 +9839,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9850 enum intel_display_power_domain power_domain; 9839 enum intel_display_power_domain power_domain;
9851 u32 tmp; 9840 u32 tmp;
9852 9841
9842 /*
9843 * The pipe->transcoder mapping is fixed with the exception of the eDP
9844 * transcoder handled below.
9845 */
9853 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9846 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9854 9847
9855 /* 9848 /*
@@ -10317,10 +10310,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10317 struct drm_i915_gem_object *obj; 10310 struct drm_i915_gem_object *obj;
10318 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10311 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10319 10312
10320 obj = i915_gem_alloc_object(dev, 10313 obj = i915_gem_object_create(dev,
10321 intel_framebuffer_size_for_mode(mode, bpp)); 10314 intel_framebuffer_size_for_mode(mode, bpp));
10322 if (obj == NULL) 10315 if (IS_ERR(obj))
10323 return ERR_PTR(-ENOMEM); 10316 return ERR_CAST(obj);
10324 10317
10325 mode_cmd.width = mode->hdisplay; 10318 mode_cmd.width = mode->hdisplay;
10326 mode_cmd.height = mode->vdisplay; 10319 mode_cmd.height = mode->vdisplay;
@@ -10632,7 +10625,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10632 int pipe = pipe_config->cpu_transcoder; 10625 int pipe = pipe_config->cpu_transcoder;
10633 u32 dpll = pipe_config->dpll_hw_state.dpll; 10626 u32 dpll = pipe_config->dpll_hw_state.dpll;
10634 u32 fp; 10627 u32 fp;
10635 intel_clock_t clock; 10628 struct dpll clock;
10636 int port_clock; 10629 int port_clock;
10637 int refclk = i9xx_pll_refclk(dev, pipe_config); 10630 int refclk = i9xx_pll_refclk(dev, pipe_config);
10638 10631
@@ -10806,31 +10799,27 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10806 return mode; 10799 return mode;
10807} 10800}
10808 10801
10809void intel_mark_busy(struct drm_device *dev) 10802void intel_mark_busy(struct drm_i915_private *dev_priv)
10810{ 10803{
10811 struct drm_i915_private *dev_priv = dev->dev_private;
10812
10813 if (dev_priv->mm.busy) 10804 if (dev_priv->mm.busy)
10814 return; 10805 return;
10815 10806
10816 intel_runtime_pm_get(dev_priv); 10807 intel_runtime_pm_get(dev_priv);
10817 i915_update_gfx_val(dev_priv); 10808 i915_update_gfx_val(dev_priv);
10818 if (INTEL_INFO(dev)->gen >= 6) 10809 if (INTEL_GEN(dev_priv) >= 6)
10819 gen6_rps_busy(dev_priv); 10810 gen6_rps_busy(dev_priv);
10820 dev_priv->mm.busy = true; 10811 dev_priv->mm.busy = true;
10821} 10812}
10822 10813
10823void intel_mark_idle(struct drm_device *dev) 10814void intel_mark_idle(struct drm_i915_private *dev_priv)
10824{ 10815{
10825 struct drm_i915_private *dev_priv = dev->dev_private;
10826
10827 if (!dev_priv->mm.busy) 10816 if (!dev_priv->mm.busy)
10828 return; 10817 return;
10829 10818
10830 dev_priv->mm.busy = false; 10819 dev_priv->mm.busy = false;
10831 10820
10832 if (INTEL_INFO(dev)->gen >= 6) 10821 if (INTEL_GEN(dev_priv) >= 6)
10833 gen6_rps_idle(dev->dev_private); 10822 gen6_rps_idle(dev_priv);
10834 10823
10835 intel_runtime_pm_put(dev_priv); 10824 intel_runtime_pm_put(dev_priv);
10836} 10825}
@@ -10839,15 +10828,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10839{ 10828{
10840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10841 struct drm_device *dev = crtc->dev; 10830 struct drm_device *dev = crtc->dev;
10842 struct intel_unpin_work *work; 10831 struct intel_flip_work *work;
10843 10832
10844 spin_lock_irq(&dev->event_lock); 10833 spin_lock_irq(&dev->event_lock);
10845 work = intel_crtc->unpin_work; 10834 work = intel_crtc->flip_work;
10846 intel_crtc->unpin_work = NULL; 10835 intel_crtc->flip_work = NULL;
10847 spin_unlock_irq(&dev->event_lock); 10836 spin_unlock_irq(&dev->event_lock);
10848 10837
10849 if (work) { 10838 if (work) {
10850 cancel_work_sync(&work->work); 10839 cancel_work_sync(&work->mmio_work);
10840 cancel_work_sync(&work->unpin_work);
10851 kfree(work); 10841 kfree(work);
10852 } 10842 }
10853 10843
@@ -10858,12 +10848,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10858 10848
10859static void intel_unpin_work_fn(struct work_struct *__work) 10849static void intel_unpin_work_fn(struct work_struct *__work)
10860{ 10850{
10861 struct intel_unpin_work *work = 10851 struct intel_flip_work *work =
10862 container_of(__work, struct intel_unpin_work, work); 10852 container_of(__work, struct intel_flip_work, unpin_work);
10863 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10853 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10864 struct drm_device *dev = crtc->base.dev; 10854 struct drm_device *dev = crtc->base.dev;
10865 struct drm_plane *primary = crtc->base.primary; 10855 struct drm_plane *primary = crtc->base.primary;
10866 10856
10857 if (is_mmio_work(work))
10858 flush_work(&work->mmio_work);
10859
10867 mutex_lock(&dev->struct_mutex); 10860 mutex_lock(&dev->struct_mutex);
10868 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 10861 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10869 drm_gem_object_unreference(&work->pending_flip_obj->base); 10862 drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10882,60 +10875,14 @@ static void intel_unpin_work_fn(struct work_struct *__work)
10882 kfree(work); 10875 kfree(work);
10883} 10876}
10884 10877
10885static void do_intel_finish_page_flip(struct drm_device *dev,
10886 struct drm_crtc *crtc)
10887{
10888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10889 struct intel_unpin_work *work;
10890 unsigned long flags;
10891
10892 /* Ignore early vblank irqs */
10893 if (intel_crtc == NULL)
10894 return;
10895
10896 /*
10897 * This is called both by irq handlers and the reset code (to complete
10898 * lost pageflips) so needs the full irqsave spinlocks.
10899 */
10900 spin_lock_irqsave(&dev->event_lock, flags);
10901 work = intel_crtc->unpin_work;
10902
10903 /* Ensure we don't miss a work->pending update ... */
10904 smp_rmb();
10905
10906 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10907 spin_unlock_irqrestore(&dev->event_lock, flags);
10908 return;
10909 }
10910
10911 page_flip_completed(intel_crtc);
10912
10913 spin_unlock_irqrestore(&dev->event_lock, flags);
10914}
10915
10916void intel_finish_page_flip(struct drm_device *dev, int pipe)
10917{
10918 struct drm_i915_private *dev_priv = dev->dev_private;
10919 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10920
10921 do_intel_finish_page_flip(dev, crtc);
10922}
10923
10924void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10925{
10926 struct drm_i915_private *dev_priv = dev->dev_private;
10927 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10928
10929 do_intel_finish_page_flip(dev, crtc);
10930}
10931
10932/* Is 'a' after or equal to 'b'? */ 10878/* Is 'a' after or equal to 'b'? */
10933static bool g4x_flip_count_after_eq(u32 a, u32 b) 10879static bool g4x_flip_count_after_eq(u32 a, u32 b)
10934{ 10880{
10935 return !((a - b) & 0x80000000); 10881 return !((a - b) & 0x80000000);
10936} 10882}
10937 10883
10938static bool page_flip_finished(struct intel_crtc *crtc) 10884static bool __pageflip_finished_cs(struct intel_crtc *crtc,
10885 struct intel_flip_work *work)
10939{ 10886{
10940 struct drm_device *dev = crtc->base.dev; 10887 struct drm_device *dev = crtc->base.dev;
10941 struct drm_i915_private *dev_priv = dev->dev_private; 10888 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10977,40 +10924,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
10977 * anyway, we don't really care. 10924 * anyway, we don't really care.
10978 */ 10925 */
10979 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10926 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10980 crtc->unpin_work->gtt_offset && 10927 crtc->flip_work->gtt_offset &&
10981 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 10928 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10982 crtc->unpin_work->flip_count); 10929 crtc->flip_work->flip_count);
10983} 10930}
10984 10931
10985void intel_prepare_page_flip(struct drm_device *dev, int plane) 10932static bool
10933__pageflip_finished_mmio(struct intel_crtc *crtc,
10934 struct intel_flip_work *work)
10986{ 10935{
10987 struct drm_i915_private *dev_priv = dev->dev_private; 10936 /*
10988 struct intel_crtc *intel_crtc = 10937 * MMIO work completes when vblank is different from
10989 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 10938 * flip_queued_vblank.
10939 *
10940 * Reset counter value doesn't matter, this is handled by
10941 * i915_wait_request finishing early, so no need to handle
10942 * reset here.
10943 */
10944 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
10945}
10946
10947
10948static bool pageflip_finished(struct intel_crtc *crtc,
10949 struct intel_flip_work *work)
10950{
10951 if (!atomic_read(&work->pending))
10952 return false;
10953
10954 smp_rmb();
10955
10956 if (is_mmio_work(work))
10957 return __pageflip_finished_mmio(crtc, work);
10958 else
10959 return __pageflip_finished_cs(crtc, work);
10960}
10961
10962void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
10963{
10964 struct drm_device *dev = dev_priv->dev;
10965 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10967 struct intel_flip_work *work;
10968 unsigned long flags;
10969
10970 /* Ignore early vblank irqs */
10971 if (!crtc)
10972 return;
10973
10974 /*
10975 * This is called both by irq handlers and the reset code (to complete
10976 * lost pageflips) so needs the full irqsave spinlocks.
10977 */
10978 spin_lock_irqsave(&dev->event_lock, flags);
10979 work = intel_crtc->flip_work;
10980
10981 if (work != NULL &&
10982 !is_mmio_work(work) &&
10983 pageflip_finished(intel_crtc, work))
10984 page_flip_completed(intel_crtc);
10985
10986 spin_unlock_irqrestore(&dev->event_lock, flags);
10987}
10988
10989void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
10990{
10991 struct drm_device *dev = dev_priv->dev;
10992 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10993 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10994 struct intel_flip_work *work;
10990 unsigned long flags; 10995 unsigned long flags;
10991 10996
10997 /* Ignore early vblank irqs */
10998 if (!crtc)
10999 return;
10992 11000
10993 /* 11001 /*
10994 * This is called both by irq handlers and the reset code (to complete 11002 * This is called both by irq handlers and the reset code (to complete
10995 * lost pageflips) so needs the full irqsave spinlocks. 11003 * lost pageflips) so needs the full irqsave spinlocks.
10996 *
10997 * NB: An MMIO update of the plane base pointer will also
10998 * generate a page-flip completion irq, i.e. every modeset
10999 * is also accompanied by a spurious intel_prepare_page_flip().
11000 */ 11004 */
11001 spin_lock_irqsave(&dev->event_lock, flags); 11005 spin_lock_irqsave(&dev->event_lock, flags);
11002 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 11006 work = intel_crtc->flip_work;
11003 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 11007
11008 if (work != NULL &&
11009 is_mmio_work(work) &&
11010 pageflip_finished(intel_crtc, work))
11011 page_flip_completed(intel_crtc);
11012
11004 spin_unlock_irqrestore(&dev->event_lock, flags); 11013 spin_unlock_irqrestore(&dev->event_lock, flags);
11005} 11014}
11006 11015
11007static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) 11016static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11017 struct intel_flip_work *work)
11008{ 11018{
11019 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
11020
11009 /* Ensure that the work item is consistent when activating it ... */ 11021 /* Ensure that the work item is consistent when activating it ... */
11010 smp_wmb(); 11022 smp_mb__before_atomic();
11011 atomic_set(&work->pending, INTEL_FLIP_PENDING); 11023 atomic_set(&work->pending, 1);
11012 /* and that it is marked active as soon as the irq could fire. */
11013 smp_wmb();
11014} 11024}
11015 11025
11016static int intel_gen2_queue_flip(struct drm_device *dev, 11026static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11041,10 +11051,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
11041 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11051 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11042 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11052 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11043 intel_ring_emit(engine, fb->pitches[0]); 11053 intel_ring_emit(engine, fb->pitches[0]);
11044 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11054 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11045 intel_ring_emit(engine, 0); /* aux display base address, unused */ 11055 intel_ring_emit(engine, 0); /* aux display base address, unused */
11046 11056
11047 intel_mark_page_flip_active(intel_crtc->unpin_work);
11048 return 0; 11057 return 0;
11049} 11058}
11050 11059
@@ -11073,10 +11082,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
11073 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | 11082 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11074 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11083 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11075 intel_ring_emit(engine, fb->pitches[0]); 11084 intel_ring_emit(engine, fb->pitches[0]);
11076 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11085 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11077 intel_ring_emit(engine, MI_NOOP); 11086 intel_ring_emit(engine, MI_NOOP);
11078 11087
11079 intel_mark_page_flip_active(intel_crtc->unpin_work);
11080 return 0; 11088 return 0;
11081} 11089}
11082 11090
@@ -11104,7 +11112,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11104 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11112 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11105 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11113 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11106 intel_ring_emit(engine, fb->pitches[0]); 11114 intel_ring_emit(engine, fb->pitches[0]);
11107 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | 11115 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11108 obj->tiling_mode); 11116 obj->tiling_mode);
11109 11117
11110 /* XXX Enabling the panel-fitter across page-flip is so far 11118 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -11115,7 +11123,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11115 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11123 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11116 intel_ring_emit(engine, pf | pipesrc); 11124 intel_ring_emit(engine, pf | pipesrc);
11117 11125
11118 intel_mark_page_flip_active(intel_crtc->unpin_work);
11119 return 0; 11126 return 0;
11120} 11127}
11121 11128
@@ -11139,7 +11146,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11139 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11146 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11140 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11147 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11141 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); 11148 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11142 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11149 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11143 11150
11144 /* Contrary to the suggestions in the documentation, 11151 /* Contrary to the suggestions in the documentation,
11145 * "Enable Panel Fitter" does not seem to be required when page 11152 * "Enable Panel Fitter" does not seem to be required when page
@@ -11151,7 +11158,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11151 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11158 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11152 intel_ring_emit(engine, pf | pipesrc); 11159 intel_ring_emit(engine, pf | pipesrc);
11153 11160
11154 intel_mark_page_flip_active(intel_crtc->unpin_work);
11155 return 0; 11161 return 0;
11156} 11162}
11157 11163
@@ -11243,10 +11249,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11243 11249
11244 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); 11250 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11245 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); 11251 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11246 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11252 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11247 intel_ring_emit(engine, (MI_NOOP)); 11253 intel_ring_emit(engine, (MI_NOOP));
11248 11254
11249 intel_mark_page_flip_active(intel_crtc->unpin_work);
11250 return 0; 11255 return 0;
11251} 11256}
11252 11257
@@ -11264,7 +11269,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11264 if (engine == NULL) 11269 if (engine == NULL)
11265 return true; 11270 return true;
11266 11271
11267 if (INTEL_INFO(engine->dev)->gen < 5) 11272 if (INTEL_GEN(engine->i915) < 5)
11268 return false; 11273 return false;
11269 11274
11270 if (i915.use_mmio_flip < 0) 11275 if (i915.use_mmio_flip < 0)
@@ -11283,7 +11288,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11283 11288
11284static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11289static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11285 unsigned int rotation, 11290 unsigned int rotation,
11286 struct intel_unpin_work *work) 11291 struct intel_flip_work *work)
11287{ 11292{
11288 struct drm_device *dev = intel_crtc->base.dev; 11293 struct drm_device *dev = intel_crtc->base.dev;
11289 struct drm_i915_private *dev_priv = dev->dev_private; 11294 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11335,7 +11340,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11335} 11340}
11336 11341
11337static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11342static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11338 struct intel_unpin_work *work) 11343 struct intel_flip_work *work)
11339{ 11344{
11340 struct drm_device *dev = intel_crtc->base.dev; 11345 struct drm_device *dev = intel_crtc->base.dev;
11341 struct drm_i915_private *dev_priv = dev->dev_private; 11346 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11358,48 +11363,20 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11358 POSTING_READ(DSPSURF(intel_crtc->plane)); 11363 POSTING_READ(DSPSURF(intel_crtc->plane));
11359} 11364}
11360 11365
11361/* 11366static void intel_mmio_flip_work_func(struct work_struct *w)
11362 * XXX: This is the temporary way to update the plane registers until we get
11363 * around to using the usual plane update functions for MMIO flips
11364 */
11365static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11366{ 11367{
11367 struct intel_crtc *crtc = mmio_flip->crtc; 11368 struct intel_flip_work *work =
11368 struct intel_unpin_work *work; 11369 container_of(w, struct intel_flip_work, mmio_work);
11369 11370 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11370 spin_lock_irq(&crtc->base.dev->event_lock); 11371 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11371 work = crtc->unpin_work;
11372 spin_unlock_irq(&crtc->base.dev->event_lock);
11373 if (work == NULL)
11374 return;
11375
11376 intel_mark_page_flip_active(work);
11377
11378 intel_pipe_update_start(crtc);
11379
11380 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11381 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11382 else
11383 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11384 ilk_do_mmio_flip(crtc, work);
11385
11386 intel_pipe_update_end(crtc);
11387}
11388
11389static void intel_mmio_flip_work_func(struct work_struct *work)
11390{
11391 struct intel_mmio_flip *mmio_flip =
11392 container_of(work, struct intel_mmio_flip, work);
11393 struct intel_framebuffer *intel_fb = 11372 struct intel_framebuffer *intel_fb =
11394 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); 11373 to_intel_framebuffer(crtc->base.primary->fb);
11395 struct drm_i915_gem_object *obj = intel_fb->obj; 11374 struct drm_i915_gem_object *obj = intel_fb->obj;
11396 11375
11397 if (mmio_flip->req) { 11376 if (work->flip_queued_req)
11398 WARN_ON(__i915_wait_request(mmio_flip->req, 11377 WARN_ON(__i915_wait_request(work->flip_queued_req,
11399 false, NULL, 11378 false, NULL,
11400 &mmio_flip->i915->rps.mmioflips)); 11379 &dev_priv->rps.mmioflips));
11401 i915_gem_request_unreference__unlocked(mmio_flip->req);
11402 }
11403 11380
11404 /* For framebuffer backed by dmabuf, wait for fence */ 11381 /* For framebuffer backed by dmabuf, wait for fence */
11405 if (obj->base.dma_buf) 11382 if (obj->base.dma_buf)
@@ -11407,29 +11384,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
11407 false, false, 11384 false, false,
11408 MAX_SCHEDULE_TIMEOUT) < 0); 11385 MAX_SCHEDULE_TIMEOUT) < 0);
11409 11386
11410 intel_do_mmio_flip(mmio_flip); 11387 intel_pipe_update_start(crtc);
11411 kfree(mmio_flip);
11412}
11413
11414static int intel_queue_mmio_flip(struct drm_device *dev,
11415 struct drm_crtc *crtc,
11416 struct drm_i915_gem_object *obj)
11417{
11418 struct intel_mmio_flip *mmio_flip;
11419
11420 mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11421 if (mmio_flip == NULL)
11422 return -ENOMEM;
11423
11424 mmio_flip->i915 = to_i915(dev);
11425 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11426 mmio_flip->crtc = to_intel_crtc(crtc);
11427 mmio_flip->rotation = crtc->primary->state->rotation;
11428 11388
11429 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11389 if (INTEL_GEN(dev_priv) >= 9)
11430 schedule_work(&mmio_flip->work); 11390 skl_do_mmio_flip(crtc, work->rotation, work);
11391 else
11392 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11393 ilk_do_mmio_flip(crtc, work);
11431 11394
11432 return 0; 11395 intel_pipe_update_end(crtc, work);
11433} 11396}
11434 11397
11435static int intel_default_queue_flip(struct drm_device *dev, 11398static int intel_default_queue_flip(struct drm_device *dev,
@@ -11442,37 +11405,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
11442 return -ENODEV; 11405 return -ENODEV;
11443} 11406}
11444 11407
11445static bool __intel_pageflip_stall_check(struct drm_device *dev, 11408static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11446 struct drm_crtc *crtc) 11409 struct intel_crtc *intel_crtc,
11410 struct intel_flip_work *work)
11447{ 11411{
11448 struct drm_i915_private *dev_priv = dev->dev_private; 11412 u32 addr, vblank;
11449 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11450 struct intel_unpin_work *work = intel_crtc->unpin_work;
11451 u32 addr;
11452 11413
11453 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 11414 if (!atomic_read(&work->pending))
11454 return true;
11455
11456 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11457 return false; 11415 return false;
11458 11416
11459 if (!work->enable_stall_check) 11417 smp_rmb();
11460 return false;
11461 11418
11419 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11462 if (work->flip_ready_vblank == 0) { 11420 if (work->flip_ready_vblank == 0) {
11463 if (work->flip_queued_req && 11421 if (work->flip_queued_req &&
11464 !i915_gem_request_completed(work->flip_queued_req, true)) 11422 !i915_gem_request_completed(work->flip_queued_req, true))
11465 return false; 11423 return false;
11466 11424
11467 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11425 work->flip_ready_vblank = vblank;
11468 } 11426 }
11469 11427
11470 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11428 if (vblank - work->flip_ready_vblank < 3)
11471 return false; 11429 return false;
11472 11430
11473 /* Potential stall - if we see that the flip has happened, 11431 /* Potential stall - if we see that the flip has happened,
11474 * assume a missed interrupt. */ 11432 * assume a missed interrupt. */
11475 if (INTEL_INFO(dev)->gen >= 4) 11433 if (INTEL_GEN(dev_priv) >= 4)
11476 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11434 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11477 else 11435 else
11478 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11436 addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11484,12 +11442,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
11484 return addr == work->gtt_offset; 11442 return addr == work->gtt_offset;
11485} 11443}
11486 11444
11487void intel_check_page_flip(struct drm_device *dev, int pipe) 11445void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11488{ 11446{
11489 struct drm_i915_private *dev_priv = dev->dev_private; 11447 struct drm_device *dev = dev_priv->dev;
11490 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11448 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11449 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11492 struct intel_unpin_work *work; 11450 struct intel_flip_work *work;
11493 11451
11494 WARN_ON(!in_interrupt()); 11452 WARN_ON(!in_interrupt());
11495 11453
@@ -11497,16 +11455,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
11497 return; 11455 return;
11498 11456
11499 spin_lock(&dev->event_lock); 11457 spin_lock(&dev->event_lock);
11500 work = intel_crtc->unpin_work; 11458 work = intel_crtc->flip_work;
11501 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11459
11502 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11460 if (work != NULL && !is_mmio_work(work) &&
11503 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11461 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11462 WARN_ONCE(1,
11463 "Kicking stuck page flip: queued at %d, now %d\n",
11464 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11504 page_flip_completed(intel_crtc); 11465 page_flip_completed(intel_crtc);
11505 work = NULL; 11466 work = NULL;
11506 } 11467 }
11507 if (work != NULL && 11468
11508 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11469 if (work != NULL && !is_mmio_work(work) &&
11509 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11470 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11471 intel_queue_rps_boost_for_request(work->flip_queued_req);
11510 spin_unlock(&dev->event_lock); 11472 spin_unlock(&dev->event_lock);
11511} 11473}
11512 11474
@@ -11522,7 +11484,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11484 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11523 struct drm_plane *primary = crtc->primary; 11485 struct drm_plane *primary = crtc->primary;
11524 enum pipe pipe = intel_crtc->pipe; 11486 enum pipe pipe = intel_crtc->pipe;
11525 struct intel_unpin_work *work; 11487 struct intel_flip_work *work;
11526 struct intel_engine_cs *engine; 11488 struct intel_engine_cs *engine;
11527 bool mmio_flip; 11489 bool mmio_flip;
11528 struct drm_i915_gem_request *request = NULL; 11490 struct drm_i915_gem_request *request = NULL;
@@ -11559,19 +11521,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11559 work->event = event; 11521 work->event = event;
11560 work->crtc = crtc; 11522 work->crtc = crtc;
11561 work->old_fb = old_fb; 11523 work->old_fb = old_fb;
11562 INIT_WORK(&work->work, intel_unpin_work_fn); 11524 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11563 11525
11564 ret = drm_crtc_vblank_get(crtc); 11526 ret = drm_crtc_vblank_get(crtc);
11565 if (ret) 11527 if (ret)
11566 goto free_work; 11528 goto free_work;
11567 11529
11568 /* We borrow the event spin lock for protecting unpin_work */ 11530 /* We borrow the event spin lock for protecting flip_work */
11569 spin_lock_irq(&dev->event_lock); 11531 spin_lock_irq(&dev->event_lock);
11570 if (intel_crtc->unpin_work) { 11532 if (intel_crtc->flip_work) {
11571 /* Before declaring the flip queue wedged, check if 11533 /* Before declaring the flip queue wedged, check if
11572 * the hardware completed the operation behind our backs. 11534 * the hardware completed the operation behind our backs.
11573 */ 11535 */
11574 if (__intel_pageflip_stall_check(dev, crtc)) { 11536 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11575 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11537 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11576 page_flip_completed(intel_crtc); 11538 page_flip_completed(intel_crtc);
11577 } else { 11539 } else {
@@ -11583,7 +11545,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11583 return -EBUSY; 11545 return -EBUSY;
11584 } 11546 }
11585 } 11547 }
11586 intel_crtc->unpin_work = work; 11548 intel_crtc->flip_work = work;
11587 spin_unlock_irq(&dev->event_lock); 11549 spin_unlock_irq(&dev->event_lock);
11588 11550
11589 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11551 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11638,6 +11600,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11638 */ 11600 */
11639 if (!mmio_flip) { 11601 if (!mmio_flip) {
11640 ret = i915_gem_object_sync(obj, engine, &request); 11602 ret = i915_gem_object_sync(obj, engine, &request);
11603 if (!ret && !request) {
11604 request = i915_gem_request_alloc(engine, NULL);
11605 ret = PTR_ERR_OR_ZERO(request);
11606 }
11607
11641 if (ret) 11608 if (ret)
11642 goto cleanup_pending; 11609 goto cleanup_pending;
11643 } 11610 }
@@ -11649,38 +11616,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11649 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11616 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11650 obj, 0); 11617 obj, 0);
11651 work->gtt_offset += intel_crtc->dspaddr_offset; 11618 work->gtt_offset += intel_crtc->dspaddr_offset;
11619 work->rotation = crtc->primary->state->rotation;
11652 11620
11653 if (mmio_flip) { 11621 if (mmio_flip) {
11654 ret = intel_queue_mmio_flip(dev, crtc, obj); 11622 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11655 if (ret)
11656 goto cleanup_unpin;
11657 11623
11658 i915_gem_request_assign(&work->flip_queued_req, 11624 i915_gem_request_assign(&work->flip_queued_req,
11659 obj->last_write_req); 11625 obj->last_write_req);
11660 } else {
11661 if (!request) {
11662 request = i915_gem_request_alloc(engine, NULL);
11663 if (IS_ERR(request)) {
11664 ret = PTR_ERR(request);
11665 goto cleanup_unpin;
11666 }
11667 }
11668 11626
11627 schedule_work(&work->mmio_work);
11628 } else {
11629 i915_gem_request_assign(&work->flip_queued_req, request);
11669 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11630 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11670 page_flip_flags); 11631 page_flip_flags);
11671 if (ret) 11632 if (ret)
11672 goto cleanup_unpin; 11633 goto cleanup_unpin;
11673 11634
11674 i915_gem_request_assign(&work->flip_queued_req, request); 11635 intel_mark_page_flip_active(intel_crtc, work);
11675 }
11676 11636
11677 if (request)
11678 i915_add_request_no_flush(request); 11637 i915_add_request_no_flush(request);
11638 }
11679 11639
11680 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11640 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11681 work->enable_stall_check = true;
11682
11683 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11684 to_intel_plane(primary)->frontbuffer_bit); 11641 to_intel_plane(primary)->frontbuffer_bit);
11685 mutex_unlock(&dev->struct_mutex); 11642 mutex_unlock(&dev->struct_mutex);
11686 11643
@@ -11706,7 +11663,7 @@ cleanup:
11706 drm_framebuffer_unreference(work->old_fb); 11663 drm_framebuffer_unreference(work->old_fb);
11707 11664
11708 spin_lock_irq(&dev->event_lock); 11665 spin_lock_irq(&dev->event_lock);
11709 intel_crtc->unpin_work = NULL; 11666 intel_crtc->flip_work = NULL;
11710 spin_unlock_irq(&dev->event_lock); 11667 spin_unlock_irq(&dev->event_lock);
11711 11668
11712 drm_crtc_vblank_put(crtc); 11669 drm_crtc_vblank_put(crtc);
@@ -11834,6 +11791,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11834 * Visibility is calculated as if the crtc was on, but 11791 * Visibility is calculated as if the crtc was on, but
11835 * after scaler setup everything depends on it being off 11792 * after scaler setup everything depends on it being off
11836 * when the crtc isn't active. 11793 * when the crtc isn't active.
11794 *
11795 * FIXME this is wrong for watermarks. Watermarks should also
11796 * be computed as if the pipe would be active. Perhaps move
11797 * per-plane wm computation to the .check_plane() hook, and
11798 * only combine the results from all planes in the current place?
11837 */ 11799 */
11838 if (!is_crtc_enabled) 11800 if (!is_crtc_enabled)
11839 to_intel_plane_state(plane_state)->visible = visible = false; 11801 to_intel_plane_state(plane_state)->visible = visible = false;
@@ -12007,7 +11969,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12007 } 11969 }
12008 } else if (dev_priv->display.compute_intermediate_wm) { 11970 } else if (dev_priv->display.compute_intermediate_wm) {
12009 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 11971 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12010 pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; 11972 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
12011 } 11973 }
12012 11974
12013 if (INTEL_INFO(dev)->gen >= 9) { 11975 if (INTEL_INFO(dev)->gen >= 9) {
@@ -13280,6 +13242,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13280 intel_state->active_crtcs |= 1 << i; 13242 intel_state->active_crtcs |= 1 << i;
13281 else 13243 else
13282 intel_state->active_crtcs &= ~(1 << i); 13244 intel_state->active_crtcs &= ~(1 << i);
13245
13246 if (crtc_state->active != crtc->state->active)
13247 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13283 } 13248 }
13284 13249
13285 /* 13250 /*
@@ -13316,38 +13281,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13316 * phase. The code here should be run after the per-crtc and per-plane 'check' 13281 * phase. The code here should be run after the per-crtc and per-plane 'check'
13317 * handlers to ensure that all derived state has been updated. 13282 * handlers to ensure that all derived state has been updated.
13318 */ 13283 */
13319static void calc_watermark_data(struct drm_atomic_state *state) 13284static int calc_watermark_data(struct drm_atomic_state *state)
13320{ 13285{
13321 struct drm_device *dev = state->dev; 13286 struct drm_device *dev = state->dev;
13322 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13287 struct drm_i915_private *dev_priv = to_i915(dev);
13323 struct drm_crtc *crtc;
13324 struct drm_crtc_state *cstate;
13325 struct drm_plane *plane;
13326 struct drm_plane_state *pstate;
13327
13328 /*
13329 * Calculate watermark configuration details now that derived
13330 * plane/crtc state is all properly updated.
13331 */
13332 drm_for_each_crtc(crtc, dev) {
13333 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13334 crtc->state;
13335
13336 if (cstate->active)
13337 intel_state->wm_config.num_pipes_active++;
13338 }
13339 drm_for_each_legacy_plane(plane, dev) {
13340 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13341 plane->state;
13342 13288
13343 if (!to_intel_plane_state(pstate)->visible) 13289 /* Is there platform-specific watermark information to calculate? */
13344 continue; 13290 if (dev_priv->display.compute_global_watermarks)
13291 return dev_priv->display.compute_global_watermarks(state);
13345 13292
13346 intel_state->wm_config.sprites_enabled = true; 13293 return 0;
13347 if (pstate->crtc_w != pstate->src_w >> 16 ||
13348 pstate->crtc_h != pstate->src_h >> 16)
13349 intel_state->wm_config.sprites_scaled = true;
13350 }
13351} 13294}
13352 13295
13353/** 13296/**
@@ -13377,14 +13320,13 @@ static int intel_atomic_check(struct drm_device *dev,
13377 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13320 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13378 crtc_state->mode_changed = true; 13321 crtc_state->mode_changed = true;
13379 13322
13380 if (!crtc_state->enable) { 13323 if (!needs_modeset(crtc_state))
13381 if (needs_modeset(crtc_state))
13382 any_ms = true;
13383 continue; 13324 continue;
13384 }
13385 13325
13386 if (!needs_modeset(crtc_state)) 13326 if (!crtc_state->enable) {
13327 any_ms = true;
13387 continue; 13328 continue;
13329 }
13388 13330
13389 /* FIXME: For only active_changed we shouldn't need to do any 13331 /* FIXME: For only active_changed we shouldn't need to do any
13390 * state recomputation at all. */ 13332 * state recomputation at all. */
@@ -13394,8 +13336,11 @@ static int intel_atomic_check(struct drm_device *dev,
13394 return ret; 13336 return ret;
13395 13337
13396 ret = intel_modeset_pipe_config(crtc, pipe_config); 13338 ret = intel_modeset_pipe_config(crtc, pipe_config);
13397 if (ret) 13339 if (ret) {
13340 intel_dump_pipe_config(to_intel_crtc(crtc),
13341 pipe_config, "[failed]");
13398 return ret; 13342 return ret;
13343 }
13399 13344
13400 if (i915.fastboot && 13345 if (i915.fastboot &&
13401 intel_pipe_config_compare(dev, 13346 intel_pipe_config_compare(dev,
@@ -13405,13 +13350,12 @@ static int intel_atomic_check(struct drm_device *dev,
13405 to_intel_crtc_state(crtc_state)->update_pipe = true; 13350 to_intel_crtc_state(crtc_state)->update_pipe = true;
13406 } 13351 }
13407 13352
13408 if (needs_modeset(crtc_state)) { 13353 if (needs_modeset(crtc_state))
13409 any_ms = true; 13354 any_ms = true;
13410 13355
13411 ret = drm_atomic_add_affected_planes(state, crtc); 13356 ret = drm_atomic_add_affected_planes(state, crtc);
13412 if (ret) 13357 if (ret)
13413 return ret; 13358 return ret;
13414 }
13415 13359
13416 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13360 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13417 needs_modeset(crtc_state) ? 13361 needs_modeset(crtc_state) ?
@@ -13431,9 +13375,7 @@ static int intel_atomic_check(struct drm_device *dev,
13431 return ret; 13375 return ret;
13432 13376
13433 intel_fbc_choose_crtc(dev_priv, state); 13377 intel_fbc_choose_crtc(dev_priv, state);
13434 calc_watermark_data(state); 13378 return calc_watermark_data(state);
13435
13436 return 0;
13437} 13379}
13438 13380
13439static int intel_atomic_prepare_commit(struct drm_device *dev, 13381static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -13495,6 +13437,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13495 return ret; 13437 return ret;
13496} 13438}
13497 13439
13440u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13441{
13442 struct drm_device *dev = crtc->base.dev;
13443
13444 if (!dev->max_vblank_count)
13445 return drm_accurate_vblank_count(&crtc->base);
13446
13447 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13448}
13449
13498static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 13450static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13499 struct drm_i915_private *dev_priv, 13451 struct drm_i915_private *dev_priv,
13500 unsigned crtc_mask) 13452 unsigned crtc_mask)
@@ -13597,7 +13549,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13597 } 13549 }
13598 13550
13599 drm_atomic_helper_swap_state(dev, state); 13551 drm_atomic_helper_swap_state(dev, state);
13600 dev_priv->wm.config = intel_state->wm_config; 13552 dev_priv->wm.distrust_bios_wm = false;
13553 dev_priv->wm.skl_results = intel_state->wm_results;
13601 intel_shared_dpll_commit(state); 13554 intel_shared_dpll_commit(state);
13602 13555
13603 if (intel_state->modeset) { 13556 if (intel_state->modeset) {
@@ -14006,7 +13959,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14006{ 13959{
14007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13960 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14008 13961
14009 intel_pipe_update_end(intel_crtc); 13962 intel_pipe_update_end(intel_crtc, NULL);
14010} 13963}
14011 13964
14012/** 13965/**
@@ -15050,12 +15003,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15050 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15003 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15051 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15004 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15052 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15005 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15053 if (IS_BROADWELL(dev_priv)) { 15006 }
15054 dev_priv->display.modeset_commit_cdclk = 15007
15055 broadwell_modeset_commit_cdclk; 15008 if (IS_BROADWELL(dev_priv)) {
15056 dev_priv->display.modeset_calc_cdclk = 15009 dev_priv->display.modeset_commit_cdclk =
15057 broadwell_modeset_calc_cdclk; 15010 broadwell_modeset_commit_cdclk;
15058 } 15011 dev_priv->display.modeset_calc_cdclk =
15012 broadwell_modeset_calc_cdclk;
15059 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15013 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15060 dev_priv->display.modeset_commit_cdclk = 15014 dev_priv->display.modeset_commit_cdclk =
15061 valleyview_modeset_commit_cdclk; 15015 valleyview_modeset_commit_cdclk;
@@ -15293,7 +15247,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
15293 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15247 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15294 15248
15295 intel_init_clock_gating(dev); 15249 intel_init_clock_gating(dev);
15296 intel_enable_gt_powersave(dev); 15250 intel_enable_gt_powersave(dev_priv);
15297} 15251}
15298 15252
15299/* 15253/*
@@ -15363,7 +15317,6 @@ retry:
15363 } 15317 }
15364 15318
15365 /* Write calculated watermark values back */ 15319 /* Write calculated watermark values back */
15366 to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15367 for_each_crtc_in_state(state, crtc, cstate, i) { 15320 for_each_crtc_in_state(state, crtc, cstate, i) {
15368 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 15321 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15369 15322
@@ -15461,7 +15414,6 @@ void intel_modeset_init(struct drm_device *dev)
15461 } 15414 }
15462 15415
15463 intel_update_czclk(dev_priv); 15416 intel_update_czclk(dev_priv);
15464 intel_update_rawclk(dev_priv);
15465 intel_update_cdclk(dev); 15417 intel_update_cdclk(dev);
15466 15418
15467 intel_shared_dpll_init(dev); 15419 intel_shared_dpll_init(dev);
@@ -16025,15 +15977,16 @@ retry:
16025 15977
16026void intel_modeset_gem_init(struct drm_device *dev) 15978void intel_modeset_gem_init(struct drm_device *dev)
16027{ 15979{
15980 struct drm_i915_private *dev_priv = to_i915(dev);
16028 struct drm_crtc *c; 15981 struct drm_crtc *c;
16029 struct drm_i915_gem_object *obj; 15982 struct drm_i915_gem_object *obj;
16030 int ret; 15983 int ret;
16031 15984
16032 intel_init_gt_powersave(dev); 15985 intel_init_gt_powersave(dev_priv);
16033 15986
16034 intel_modeset_init_hw(dev); 15987 intel_modeset_init_hw(dev);
16035 15988
16036 intel_setup_overlay(dev); 15989 intel_setup_overlay(dev_priv);
16037 15990
16038 /* 15991 /*
16039 * Make sure any fbs we allocated at startup are properly 15992 * Make sure any fbs we allocated at startup are properly
@@ -16076,7 +16029,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
16076 struct drm_i915_private *dev_priv = dev->dev_private; 16029 struct drm_i915_private *dev_priv = dev->dev_private;
16077 struct intel_connector *connector; 16030 struct intel_connector *connector;
16078 16031
16079 intel_disable_gt_powersave(dev); 16032 intel_disable_gt_powersave(dev_priv);
16080 16033
16081 intel_backlight_unregister(dev); 16034 intel_backlight_unregister(dev);
16082 16035
@@ -16106,9 +16059,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
16106 16059
16107 drm_mode_config_cleanup(dev); 16060 drm_mode_config_cleanup(dev);
16108 16061
16109 intel_cleanup_overlay(dev); 16062 intel_cleanup_overlay(dev_priv);
16110 16063
16111 intel_cleanup_gt_powersave(dev); 16064 intel_cleanup_gt_powersave(dev_priv);
16112 16065
16113 intel_teardown_gmbus(dev); 16066 intel_teardown_gmbus(dev);
16114} 16067}
@@ -16204,9 +16157,8 @@ struct intel_display_error_state {
16204}; 16157};
16205 16158
16206struct intel_display_error_state * 16159struct intel_display_error_state *
16207intel_display_capture_error_state(struct drm_device *dev) 16160intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16208{ 16161{
16209 struct drm_i915_private *dev_priv = dev->dev_private;
16210 struct intel_display_error_state *error; 16162 struct intel_display_error_state *error;
16211 int transcoders[] = { 16163 int transcoders[] = {
16212 TRANSCODER_A, 16164 TRANSCODER_A,
@@ -16216,14 +16168,14 @@ intel_display_capture_error_state(struct drm_device *dev)
16216 }; 16168 };
16217 int i; 16169 int i;
16218 16170
16219 if (INTEL_INFO(dev)->num_pipes == 0) 16171 if (INTEL_INFO(dev_priv)->num_pipes == 0)
16220 return NULL; 16172 return NULL;
16221 16173
16222 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16174 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16223 if (error == NULL) 16175 if (error == NULL)
16224 return NULL; 16176 return NULL;
16225 16177
16226 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16178 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16227 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16179 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16228 16180
16229 for_each_pipe(dev_priv, i) { 16181 for_each_pipe(dev_priv, i) {
@@ -16239,25 +16191,25 @@ intel_display_capture_error_state(struct drm_device *dev)
16239 16191
16240 error->plane[i].control = I915_READ(DSPCNTR(i)); 16192 error->plane[i].control = I915_READ(DSPCNTR(i));
16241 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16193 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16242 if (INTEL_INFO(dev)->gen <= 3) { 16194 if (INTEL_GEN(dev_priv) <= 3) {
16243 error->plane[i].size = I915_READ(DSPSIZE(i)); 16195 error->plane[i].size = I915_READ(DSPSIZE(i));
16244 error->plane[i].pos = I915_READ(DSPPOS(i)); 16196 error->plane[i].pos = I915_READ(DSPPOS(i));
16245 } 16197 }
16246 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16198 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16247 error->plane[i].addr = I915_READ(DSPADDR(i)); 16199 error->plane[i].addr = I915_READ(DSPADDR(i));
16248 if (INTEL_INFO(dev)->gen >= 4) { 16200 if (INTEL_GEN(dev_priv) >= 4) {
16249 error->plane[i].surface = I915_READ(DSPSURF(i)); 16201 error->plane[i].surface = I915_READ(DSPSURF(i));
16250 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16202 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16251 } 16203 }
16252 16204
16253 error->pipe[i].source = I915_READ(PIPESRC(i)); 16205 error->pipe[i].source = I915_READ(PIPESRC(i));
16254 16206
16255 if (HAS_GMCH_DISPLAY(dev)) 16207 if (HAS_GMCH_DISPLAY(dev_priv))
16256 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16208 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16257 } 16209 }
16258 16210
16259 /* Note: this does not include DSI transcoders. */ 16211 /* Note: this does not include DSI transcoders. */
16260 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 16212 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16261 if (HAS_DDI(dev_priv)) 16213 if (HAS_DDI(dev_priv))
16262 error->num_transcoders++; /* Account for eDP. */ 16214 error->num_transcoders++; /* Account for eDP. */
16263 16215
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f192f58708c2..cccf9bc7c7d6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe); 131 enum pipe pipe);
132static void intel_dp_unset_edid(struct intel_dp *intel_dp); 132static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133 133
134static unsigned int intel_dp_unused_lane_mask(int lane_count)
135{
136 return ~((1 << lane_count) - 1) & 0xf;
137}
138
139static int 134static int
140intel_dp_max_link_bw(struct intel_dp *intel_dp) 135intel_dp_max_link_bw(struct intel_dp *intel_dp)
141{ 136{
@@ -775,6 +770,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
775 DP_AUX_CH_CTL_TIME_OUT_1600us | 770 DP_AUX_CH_CTL_TIME_OUT_1600us |
776 DP_AUX_CH_CTL_RECEIVE_ERROR | 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
777 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
778 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 774 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
779} 775}
780 776
@@ -2460,50 +2456,6 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
2460 intel_dp_link_down(intel_dp); 2456 intel_dp_link_down(intel_dp);
2461} 2457}
2462 2458
2463static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2464 bool reset)
2465{
2466 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2467 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2468 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2469 enum pipe pipe = crtc->pipe;
2470 uint32_t val;
2471
2472 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2473 if (reset)
2474 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2475 else
2476 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2477 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2478
2479 if (crtc->config->lane_count > 2) {
2480 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2481 if (reset)
2482 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2483 else
2484 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2485 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2486 }
2487
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2489 val |= CHV_PCS_REQ_SOFTRESET_EN;
2490 if (reset)
2491 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2492 else
2493 val |= DPIO_PCS_CLK_SOFT_RESET;
2494 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2495
2496 if (crtc->config->lane_count > 2) {
2497 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2498 val |= CHV_PCS_REQ_SOFTRESET_EN;
2499 if (reset)
2500 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2501 else
2502 val |= DPIO_PCS_CLK_SOFT_RESET;
2503 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2504 }
2505}
2506
2507static void chv_post_disable_dp(struct intel_encoder *encoder) 2459static void chv_post_disable_dp(struct intel_encoder *encoder)
2508{ 2460{
2509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2461 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2811,266 +2763,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2811 2763
2812static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2764static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2813{ 2765{
2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2766 vlv_phy_pre_encoder_enable(encoder);
2815 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2816 struct drm_device *dev = encoder->base.dev;
2817 struct drm_i915_private *dev_priv = dev->dev_private;
2818 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2819 enum dpio_channel port = vlv_dport_to_channel(dport);
2820 int pipe = intel_crtc->pipe;
2821 u32 val;
2822
2823 mutex_lock(&dev_priv->sb_lock);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2826 val = 0;
2827 if (pipe)
2828 val |= (1<<21);
2829 else
2830 val &= ~(1<<21);
2831 val |= 0x001000c4;
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2833 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2834 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2835
2836 mutex_unlock(&dev_priv->sb_lock);
2837 2767
2838 intel_enable_dp(encoder); 2768 intel_enable_dp(encoder);
2839} 2769}
2840 2770
2841static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 2771static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2842{ 2772{
2843 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2844 struct drm_device *dev = encoder->base.dev;
2845 struct drm_i915_private *dev_priv = dev->dev_private;
2846 struct intel_crtc *intel_crtc =
2847 to_intel_crtc(encoder->base.crtc);
2848 enum dpio_channel port = vlv_dport_to_channel(dport);
2849 int pipe = intel_crtc->pipe;
2850
2851 intel_dp_prepare(encoder); 2773 intel_dp_prepare(encoder);
2852 2774
2853 /* Program Tx lane resets to default */ 2775 vlv_phy_pre_pll_enable(encoder);
2854 mutex_lock(&dev_priv->sb_lock);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2856 DPIO_PCS_TX_LANE2_RESET |
2857 DPIO_PCS_TX_LANE1_RESET);
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2859 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2860 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2861 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2862 DPIO_PCS_CLK_SOFT_RESET);
2863
2864 /* Fix up inter-pair skew failure */
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2866 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2867 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2868 mutex_unlock(&dev_priv->sb_lock);
2869} 2776}
2870 2777
2871static void chv_pre_enable_dp(struct intel_encoder *encoder) 2778static void chv_pre_enable_dp(struct intel_encoder *encoder)
2872{ 2779{
2873 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2780 chv_phy_pre_encoder_enable(encoder);
2874 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2875 struct drm_device *dev = encoder->base.dev;
2876 struct drm_i915_private *dev_priv = dev->dev_private;
2877 struct intel_crtc *intel_crtc =
2878 to_intel_crtc(encoder->base.crtc);
2879 enum dpio_channel ch = vlv_dport_to_channel(dport);
2880 int pipe = intel_crtc->pipe;
2881 int data, i, stagger;
2882 u32 val;
2883
2884 mutex_lock(&dev_priv->sb_lock);
2885
2886 /* allow hardware to manage TX FIFO reset source */
2887 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2888 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2889 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2890
2891 if (intel_crtc->config->lane_count > 2) {
2892 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2893 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2894 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2895 }
2896
2897 /* Program Tx lane latency optimal setting*/
2898 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2899 /* Set the upar bit */
2900 if (intel_crtc->config->lane_count == 1)
2901 data = 0x0;
2902 else
2903 data = (i == 1) ? 0x0 : 0x1;
2904 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2905 data << DPIO_UPAR_SHIFT);
2906 }
2907
2908 /* Data lane stagger programming */
2909 if (intel_crtc->config->port_clock > 270000)
2910 stagger = 0x18;
2911 else if (intel_crtc->config->port_clock > 135000)
2912 stagger = 0xd;
2913 else if (intel_crtc->config->port_clock > 67500)
2914 stagger = 0x7;
2915 else if (intel_crtc->config->port_clock > 33750)
2916 stagger = 0x4;
2917 else
2918 stagger = 0x2;
2919
2920 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2921 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2922 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2923
2924 if (intel_crtc->config->lane_count > 2) {
2925 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2926 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2927 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2928 }
2929
2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2931 DPIO_LANESTAGGER_STRAP(stagger) |
2932 DPIO_LANESTAGGER_STRAP_OVRD |
2933 DPIO_TX1_STAGGER_MASK(0x1f) |
2934 DPIO_TX1_STAGGER_MULT(6) |
2935 DPIO_TX2_STAGGER_MULT(0));
2936
2937 if (intel_crtc->config->lane_count > 2) {
2938 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2939 DPIO_LANESTAGGER_STRAP(stagger) |
2940 DPIO_LANESTAGGER_STRAP_OVRD |
2941 DPIO_TX1_STAGGER_MASK(0x1f) |
2942 DPIO_TX1_STAGGER_MULT(7) |
2943 DPIO_TX2_STAGGER_MULT(5));
2944 }
2945
2946 /* Deassert data lane reset */
2947 chv_data_lane_soft_reset(encoder, false);
2948
2949 mutex_unlock(&dev_priv->sb_lock);
2950 2781
2951 intel_enable_dp(encoder); 2782 intel_enable_dp(encoder);
2952 2783
2953 /* Second common lane will stay alive on its own now */ 2784 /* Second common lane will stay alive on its own now */
2954 if (dport->release_cl2_override) { 2785 chv_phy_release_cl2_override(encoder);
2955 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2956 dport->release_cl2_override = false;
2957 }
2958} 2786}
2959 2787
2960static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) 2788static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2961{ 2789{
2962 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2963 struct drm_device *dev = encoder->base.dev;
2964 struct drm_i915_private *dev_priv = dev->dev_private;
2965 struct intel_crtc *intel_crtc =
2966 to_intel_crtc(encoder->base.crtc);
2967 enum dpio_channel ch = vlv_dport_to_channel(dport);
2968 enum pipe pipe = intel_crtc->pipe;
2969 unsigned int lane_mask =
2970 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2971 u32 val;
2972
2973 intel_dp_prepare(encoder); 2790 intel_dp_prepare(encoder);
2974 2791
2975 /* 2792 chv_phy_pre_pll_enable(encoder);
2976 * Must trick the second common lane into life.
2977 * Otherwise we can't even access the PLL.
2978 */
2979 if (ch == DPIO_CH0 && pipe == PIPE_B)
2980 dport->release_cl2_override =
2981 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2982
2983 chv_phy_powergate_lanes(encoder, true, lane_mask);
2984
2985 mutex_lock(&dev_priv->sb_lock);
2986
2987 /* Assert data lane reset */
2988 chv_data_lane_soft_reset(encoder, true);
2989
2990 /* program left/right clock distribution */
2991 if (pipe != PIPE_B) {
2992 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2993 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2994 if (ch == DPIO_CH0)
2995 val |= CHV_BUFLEFTENA1_FORCE;
2996 if (ch == DPIO_CH1)
2997 val |= CHV_BUFRIGHTENA1_FORCE;
2998 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2999 } else {
3000 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3001 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3002 if (ch == DPIO_CH0)
3003 val |= CHV_BUFLEFTENA2_FORCE;
3004 if (ch == DPIO_CH1)
3005 val |= CHV_BUFRIGHTENA2_FORCE;
3006 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3007 }
3008
3009 /* program clock channel usage */
3010 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3011 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3012 if (pipe != PIPE_B)
3013 val &= ~CHV_PCS_USEDCLKCHANNEL;
3014 else
3015 val |= CHV_PCS_USEDCLKCHANNEL;
3016 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3017
3018 if (intel_crtc->config->lane_count > 2) {
3019 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3020 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3021 if (pipe != PIPE_B)
3022 val &= ~CHV_PCS_USEDCLKCHANNEL;
3023 else
3024 val |= CHV_PCS_USEDCLKCHANNEL;
3025 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3026 }
3027
3028 /*
3029 * This a a bit weird since generally CL
3030 * matches the pipe, but here we need to
3031 * pick the CL based on the port.
3032 */
3033 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3034 if (pipe != PIPE_B)
3035 val &= ~CHV_CMN_USEDCLKCHANNEL;
3036 else
3037 val |= CHV_CMN_USEDCLKCHANNEL;
3038 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3039
3040 mutex_unlock(&dev_priv->sb_lock);
3041} 2793}
3042 2794
3043static void chv_dp_post_pll_disable(struct intel_encoder *encoder) 2795static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3044{ 2796{
3045 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2797 chv_phy_post_pll_disable(encoder);
3046 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3047 u32 val;
3048
3049 mutex_lock(&dev_priv->sb_lock);
3050
3051 /* disable left/right clock distribution */
3052 if (pipe != PIPE_B) {
3053 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3054 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3055 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3056 } else {
3057 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3058 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3059 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3060 }
3061
3062 mutex_unlock(&dev_priv->sb_lock);
3063
3064 /*
3065 * Leave the power down bit cleared for at least one
3066 * lane so that chv_powergate_phy_ch() will power
3067 * on something when the channel is otherwise unused.
3068 * When the port is off and the override is removed
3069 * the lanes power down anyway, so otherwise it doesn't
3070 * really matter what the state of power down bits is
3071 * after this.
3072 */
3073 chv_phy_powergate_lanes(encoder, false, 0x0);
3074} 2798}
3075 2799
3076/* 2800/*
@@ -3178,16 +2902,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3178 2902
3179static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) 2903static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3180{ 2904{
3181 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2905 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3182 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3184 struct intel_crtc *intel_crtc =
3185 to_intel_crtc(dport->base.base.crtc);
3186 unsigned long demph_reg_value, preemph_reg_value, 2906 unsigned long demph_reg_value, preemph_reg_value,
3187 uniqtranscale_reg_value; 2907 uniqtranscale_reg_value;
3188 uint8_t train_set = intel_dp->train_set[0]; 2908 uint8_t train_set = intel_dp->train_set[0];
3189 enum dpio_channel port = vlv_dport_to_channel(dport);
3190 int pipe = intel_crtc->pipe;
3191 2909
3192 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2910 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3193 case DP_TRAIN_PRE_EMPH_LEVEL_0: 2911 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +2980,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3262 return 0; 2980 return 0;
3263 } 2981 }
3264 2982
3265 mutex_lock(&dev_priv->sb_lock); 2983 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3266 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); 2984 uniqtranscale_reg_value, 0);
3267 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3268 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3269 uniqtranscale_reg_value);
3270 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3272 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3273 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3274 mutex_unlock(&dev_priv->sb_lock);
3275 2985
3276 return 0; 2986 return 0;
3277} 2987}
3278 2988
3279static bool chv_need_uniq_trans_scale(uint8_t train_set)
3280{
3281 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3282 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3283}
3284
3285static uint32_t chv_signal_levels(struct intel_dp *intel_dp) 2989static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3286{ 2990{
3287 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2991 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3288 struct drm_i915_private *dev_priv = dev->dev_private; 2992 u32 deemph_reg_value, margin_reg_value;
3289 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2993 bool uniq_trans_scale = false;
3290 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3291 u32 deemph_reg_value, margin_reg_value, val;
3292 uint8_t train_set = intel_dp->train_set[0]; 2994 uint8_t train_set = intel_dp->train_set[0];
3293 enum dpio_channel ch = vlv_dport_to_channel(dport);
3294 enum pipe pipe = intel_crtc->pipe;
3295 int i;
3296 2995
3297 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2996 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3298 case DP_TRAIN_PRE_EMPH_LEVEL_0: 2997 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3011,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3313 deemph_reg_value = 128; 3012 deemph_reg_value = 128;
3314 margin_reg_value = 154; 3013 margin_reg_value = 154;
3315 /* FIXME extra to set for 1200 */ 3014 uniq_trans_scale = true;
3316 break; 3015 break;
3317 default: 3016 default:
3318 return 0; 3017 return 0;
@@ -3364,88 +3063,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3364 return 0; 3063 return 0;
3365 } 3064 }
3366 3065
3367 mutex_lock(&dev_priv->sb_lock); 3066 chv_set_phy_signal_level(encoder, deemph_reg_value,
3368 3067 margin_reg_value, uniq_trans_scale);
3369 /* Clear calc init */
3370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3371 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3372 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3373 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3374 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3375
3376 if (intel_crtc->config->lane_count > 2) {
3377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3378 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3379 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3380 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3381 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3382 }
3383
3384 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3385 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3386 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3387 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3388
3389 if (intel_crtc->config->lane_count > 2) {
3390 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3391 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3392 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3393 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3394 }
3395
3396 /* Program swing deemph */
3397 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3398 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3399 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3400 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3401 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3402 }
3403
3404 /* Program swing margin */
3405 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3406 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3407
3408 val &= ~DPIO_SWING_MARGIN000_MASK;
3409 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3410
3411 /*
3412 * Supposedly this value shouldn't matter when unique transition
3413 * scale is disabled, but in fact it does matter. Let's just
3414 * always program the same value and hope it's OK.
3415 */
3416 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3417 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3418
3419 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3420 }
3421
3422 /*
3423 * The document said it needs to set bit 27 for ch0 and bit 26
3424 * for ch1. Might be a typo in the doc.
3425 * For now, for this unique transition scale selection, set bit
3426 * 27 for ch0 and ch1.
3427 */
3428 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3429 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3430 if (chv_need_uniq_trans_scale(train_set))
3431 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3432 else
3433 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3434 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3435 }
3436
3437 /* Start swing calculation */
3438 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3439 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3440 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3441
3442 if (intel_crtc->config->lane_count > 2) {
3443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3444 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3446 }
3447
3448 mutex_unlock(&dev_priv->sb_lock);
3449 3068
3450 return 0; 3069 return 0;
3451} 3070}
@@ -3714,7 +3333,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3333 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3715 struct drm_device *dev = dig_port->base.base.dev; 3334 struct drm_device *dev = dig_port->base.base.dev;
3716 struct drm_i915_private *dev_priv = dev->dev_private; 3335 struct drm_i915_private *dev_priv = dev->dev_private;
3717 uint8_t rev;
3718 3336
3719 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 3337 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3720 sizeof(intel_dp->dpcd)) < 0) 3338 sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3389,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3771 DRM_DEBUG_KMS("PSR2 %s on sink", 3389 DRM_DEBUG_KMS("PSR2 %s on sink",
3772 dev_priv->psr.psr2_support ? "supported" : "not supported"); 3390 dev_priv->psr.psr2_support ? "supported" : "not supported");
3773 } 3391 }
3392
3393 /* Read the eDP Display control capabilities registers */
3394 memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
3395 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3396 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3397 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3398 sizeof(intel_dp->edp_dpcd)))
3399 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3400 intel_dp->edp_dpcd);
3774 } 3401 }
3775 3402
3776 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", 3403 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3405,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3778 yesno(drm_dp_tps3_supported(intel_dp->dpcd))); 3405 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3779 3406
3780 /* Intermediate frequency support */ 3407 /* Intermediate frequency support */
3781 if (is_edp(intel_dp) && 3408 if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
3782 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3783 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3784 (rev >= 0x03)) { /* eDp v1.4 or higher */
3785 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3409 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3786 int i; 3410 int i;
3787 3411
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644
index 000000000000..6532e226db29
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "intel_drv.h"
26
27static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
28{
29 uint8_t reg_val = 0;
30
31 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
32 &reg_val) < 0) {
33 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
34 DP_EDP_DISPLAY_CONTROL_REGISTER);
35 return;
36 }
37 if (enable)
38 reg_val |= DP_EDP_BACKLIGHT_ENABLE;
39 else
40 reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
41
42 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
43 reg_val) != 1) {
44 DRM_DEBUG_KMS("Failed to %s aux backlight\n",
45 enable ? "enable" : "disable");
46 }
47}
48
49/*
50 * Read the current backlight value from DPCD register(s) based
51 * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
52 */
53static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
54{
55 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
56 uint8_t read_val[2] = { 0x0 };
57 uint16_t level = 0;
58
59 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
60 &read_val, sizeof(read_val)) < 0) {
61 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
62 DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
63 return 0;
64 }
65 level = read_val[0];
66 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
67 level = (read_val[0] << 8 | read_val[1]);
68
69 return level;
70}
71
72/*
73 * Sends the current backlight level over the aux channel, checking if its using
74 * 8-bit or 16 bit value (MSB and LSB)
75 */
76static void
77intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
78{
79 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
80 uint8_t vals[2] = { 0x0 };
81
82 vals[0] = level;
83
84 /* Write the MSB and/or LSB */
85 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
86 vals[0] = (level & 0xFF00) >> 8;
87 vals[1] = (level & 0xFF);
88 }
89 if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
90 vals, sizeof(vals)) < 0) {
91 DRM_DEBUG_KMS("Failed to write aux backlight level\n");
92 return;
93 }
94}
95
96static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
97{
98 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
99 uint8_t dpcd_buf = 0;
100
101 set_aux_backlight_enable(intel_dp, true);
102
103 if ((drm_dp_dpcd_readb(&intel_dp->aux,
104 DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
105 ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
106 DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
107 drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
108 (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
109}
110
111static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
112{
113 set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
114}
115
116static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
117 enum pipe pipe)
118{
119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
120 struct intel_panel *panel = &connector->panel;
121
122 intel_dp_aux_enable_backlight(connector);
123
124 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
125 panel->backlight.max = 0xFFFF;
126 else
127 panel->backlight.max = 0xFF;
128
129 panel->backlight.min = 0;
130 panel->backlight.level = intel_dp_aux_get_backlight(connector);
131
132 panel->backlight.enabled = panel->backlight.level != 0;
133
134 return 0;
135}
136
137static bool
138intel_dp_aux_display_control_capable(struct intel_connector *connector)
139{
140 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
141
142 /* Check the eDP Display control capabilities registers to determine if
143 * the panel can support backlight control over the aux channel
144 */
145 if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
146 (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
147 !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
148 (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
149 DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
150 return true;
151 }
152 return false;
153}
154
155int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
156{
157 struct intel_panel *panel = &intel_connector->panel;
158
159 if (!i915.enable_dpcd_backlight)
160 return -ENODEV;
161
162 if (!intel_dp_aux_display_control_capable(intel_connector))
163 return -ENODEV;
164
165 panel->backlight.setup = intel_dp_aux_setup_backlight;
166 panel->backlight.enable = intel_dp_aux_enable_backlight;
167 panel->backlight.disable = intel_dp_aux_disable_backlight;
168 panel->backlight.set = intel_dp_aux_set_backlight;
169 panel->backlight.get = intel_dp_aux_get_backlight;
170
171 return 0;
172}
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644
index 000000000000..288da35572b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -0,0 +1,470 @@
1/*
2 * Copyright © 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25
26void chv_set_phy_signal_level(struct intel_encoder *encoder,
27 u32 deemph_reg_value, u32 margin_reg_value,
28 bool uniq_trans_scale)
29{
30 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
31 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
32 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
33 enum dpio_channel ch = vlv_dport_to_channel(dport);
34 enum pipe pipe = intel_crtc->pipe;
35 u32 val;
36 int i;
37
38 mutex_lock(&dev_priv->sb_lock);
39
40 /* Clear calc init */
41 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
42 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
43 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
44 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
45 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
46
47 if (intel_crtc->config->lane_count > 2) {
48 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
49 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
50 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
51 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
52 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
53 }
54
55 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
56 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
57 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
58 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
59
60 if (intel_crtc->config->lane_count > 2) {
61 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
62 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
63 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
64 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
65 }
66
67 /* Program swing deemph */
68 for (i = 0; i < intel_crtc->config->lane_count; i++) {
69 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
70 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
71 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
72 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
73 }
74
75 /* Program swing margin */
76 for (i = 0; i < intel_crtc->config->lane_count; i++) {
77 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
78
79 val &= ~DPIO_SWING_MARGIN000_MASK;
80 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
81
82 /*
83 * Supposedly this value shouldn't matter when unique transition
84 * scale is disabled, but in fact it does matter. Let's just
85 * always program the same value and hope it's OK.
86 */
87 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
88 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
89
90 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
91 }
92
93 /*
94 * The document said it needs to set bit 27 for ch0 and bit 26
95 * for ch1. Might be a typo in the doc.
96 * For now, for this unique transition scale selection, set bit
97 * 27 for ch0 and ch1.
98 */
99 for (i = 0; i < intel_crtc->config->lane_count; i++) {
100 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
101 if (uniq_trans_scale)
102 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
103 else
104 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
105 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
106 }
107
108 /* Start swing calculation */
109 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
110 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
111 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
112
113 if (intel_crtc->config->lane_count > 2) {
114 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
115 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
116 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
117 }
118
119 mutex_unlock(&dev_priv->sb_lock);
120
121}
122
123void chv_data_lane_soft_reset(struct intel_encoder *encoder,
124 bool reset)
125{
126 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
127 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
128 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
129 enum pipe pipe = crtc->pipe;
130 uint32_t val;
131
132 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
133 if (reset)
134 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
135 else
136 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
137 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
138
139 if (crtc->config->lane_count > 2) {
140 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
141 if (reset)
142 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
143 else
144 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
145 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
146 }
147
148 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
149 val |= CHV_PCS_REQ_SOFTRESET_EN;
150 if (reset)
151 val &= ~DPIO_PCS_CLK_SOFT_RESET;
152 else
153 val |= DPIO_PCS_CLK_SOFT_RESET;
154 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
155
156 if (crtc->config->lane_count > 2) {
157 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
158 val |= CHV_PCS_REQ_SOFTRESET_EN;
159 if (reset)
160 val &= ~DPIO_PCS_CLK_SOFT_RESET;
161 else
162 val |= DPIO_PCS_CLK_SOFT_RESET;
163 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
164 }
165}
166
167void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
168{
169 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
170 struct drm_device *dev = encoder->base.dev;
171 struct drm_i915_private *dev_priv = dev->dev_private;
172 struct intel_crtc *intel_crtc =
173 to_intel_crtc(encoder->base.crtc);
174 enum dpio_channel ch = vlv_dport_to_channel(dport);
175 enum pipe pipe = intel_crtc->pipe;
176 unsigned int lane_mask =
177 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
178 u32 val;
179
180 /*
181 * Must trick the second common lane into life.
182 * Otherwise we can't even access the PLL.
183 */
184 if (ch == DPIO_CH0 && pipe == PIPE_B)
185 dport->release_cl2_override =
186 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
187
188 chv_phy_powergate_lanes(encoder, true, lane_mask);
189
190 mutex_lock(&dev_priv->sb_lock);
191
192 /* Assert data lane reset */
193 chv_data_lane_soft_reset(encoder, true);
194
195 /* program left/right clock distribution */
196 if (pipe != PIPE_B) {
197 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
198 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
199 if (ch == DPIO_CH0)
200 val |= CHV_BUFLEFTENA1_FORCE;
201 if (ch == DPIO_CH1)
202 val |= CHV_BUFRIGHTENA1_FORCE;
203 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
204 } else {
205 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
206 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
207 if (ch == DPIO_CH0)
208 val |= CHV_BUFLEFTENA2_FORCE;
209 if (ch == DPIO_CH1)
210 val |= CHV_BUFRIGHTENA2_FORCE;
211 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
212 }
213
214 /* program clock channel usage */
215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
216 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
217 if (pipe != PIPE_B)
218 val &= ~CHV_PCS_USEDCLKCHANNEL;
219 else
220 val |= CHV_PCS_USEDCLKCHANNEL;
221 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
222
223 if (intel_crtc->config->lane_count > 2) {
224 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
225 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
226 if (pipe != PIPE_B)
227 val &= ~CHV_PCS_USEDCLKCHANNEL;
228 else
229 val |= CHV_PCS_USEDCLKCHANNEL;
230 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
231 }
232
233 /*
234 * This a a bit weird since generally CL
235 * matches the pipe, but here we need to
236 * pick the CL based on the port.
237 */
238 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
239 if (pipe != PIPE_B)
240 val &= ~CHV_CMN_USEDCLKCHANNEL;
241 else
242 val |= CHV_CMN_USEDCLKCHANNEL;
243 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
244
245 mutex_unlock(&dev_priv->sb_lock);
246}
247
248void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
249{
250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
251 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = encoder->base.dev;
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_crtc *intel_crtc =
255 to_intel_crtc(encoder->base.crtc);
256 enum dpio_channel ch = vlv_dport_to_channel(dport);
257 int pipe = intel_crtc->pipe;
258 int data, i, stagger;
259 u32 val;
260
261 mutex_lock(&dev_priv->sb_lock);
262
263 /* allow hardware to manage TX FIFO reset source */
264 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
265 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
266 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
267
268 if (intel_crtc->config->lane_count > 2) {
269 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
270 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
271 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
272 }
273
274 /* Program Tx lane latency optimal setting*/
275 for (i = 0; i < intel_crtc->config->lane_count; i++) {
276 /* Set the upar bit */
277 if (intel_crtc->config->lane_count == 1)
278 data = 0x0;
279 else
280 data = (i == 1) ? 0x0 : 0x1;
281 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
282 data << DPIO_UPAR_SHIFT);
283 }
284
285 /* Data lane stagger programming */
286 if (intel_crtc->config->port_clock > 270000)
287 stagger = 0x18;
288 else if (intel_crtc->config->port_clock > 135000)
289 stagger = 0xd;
290 else if (intel_crtc->config->port_clock > 67500)
291 stagger = 0x7;
292 else if (intel_crtc->config->port_clock > 33750)
293 stagger = 0x4;
294 else
295 stagger = 0x2;
296
297 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
298 val |= DPIO_TX2_STAGGER_MASK(0x1f);
299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
300
301 if (intel_crtc->config->lane_count > 2) {
302 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
303 val |= DPIO_TX2_STAGGER_MASK(0x1f);
304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
305 }
306
307 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
308 DPIO_LANESTAGGER_STRAP(stagger) |
309 DPIO_LANESTAGGER_STRAP_OVRD |
310 DPIO_TX1_STAGGER_MASK(0x1f) |
311 DPIO_TX1_STAGGER_MULT(6) |
312 DPIO_TX2_STAGGER_MULT(0));
313
314 if (intel_crtc->config->lane_count > 2) {
315 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
316 DPIO_LANESTAGGER_STRAP(stagger) |
317 DPIO_LANESTAGGER_STRAP_OVRD |
318 DPIO_TX1_STAGGER_MASK(0x1f) |
319 DPIO_TX1_STAGGER_MULT(7) |
320 DPIO_TX2_STAGGER_MULT(5));
321 }
322
323 /* Deassert data lane reset */
324 chv_data_lane_soft_reset(encoder, false);
325
326 mutex_unlock(&dev_priv->sb_lock);
327}
328
329void chv_phy_release_cl2_override(struct intel_encoder *encoder)
330{
331 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
332 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
333
334 if (dport->release_cl2_override) {
335 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
336 dport->release_cl2_override = false;
337 }
338}
339
340void chv_phy_post_pll_disable(struct intel_encoder *encoder)
341{
342 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
343 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
344 u32 val;
345
346 mutex_lock(&dev_priv->sb_lock);
347
348 /* disable left/right clock distribution */
349 if (pipe != PIPE_B) {
350 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
351 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
352 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
353 } else {
354 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
355 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
356 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
357 }
358
359 mutex_unlock(&dev_priv->sb_lock);
360
361 /*
362 * Leave the power down bit cleared for at least one
363 * lane so that chv_powergate_phy_ch() will power
364 * on something when the channel is otherwise unused.
365 * When the port is off and the override is removed
366 * the lanes power down anyway, so otherwise it doesn't
367 * really matter what the state of power down bits is
368 * after this.
369 */
370 chv_phy_powergate_lanes(encoder, false, 0x0);
371}
372
373void vlv_set_phy_signal_level(struct intel_encoder *encoder,
374 u32 demph_reg_value, u32 preemph_reg_value,
375 u32 uniqtranscale_reg_value, u32 tx3_demph)
376{
377 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
378 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
379 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
380 enum dpio_channel port = vlv_dport_to_channel(dport);
381 int pipe = intel_crtc->pipe;
382
383 mutex_lock(&dev_priv->sb_lock);
384 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
385 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
386 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
387 uniqtranscale_reg_value);
388 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
389
390 if (tx3_demph)
391 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
392
393 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
394 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
395 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
396 mutex_unlock(&dev_priv->sb_lock);
397}
398
399void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
400{
401 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
402 struct drm_device *dev = encoder->base.dev;
403 struct drm_i915_private *dev_priv = dev->dev_private;
404 struct intel_crtc *intel_crtc =
405 to_intel_crtc(encoder->base.crtc);
406 enum dpio_channel port = vlv_dport_to_channel(dport);
407 int pipe = intel_crtc->pipe;
408
409 /* Program Tx lane resets to default */
410 mutex_lock(&dev_priv->sb_lock);
411 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
412 DPIO_PCS_TX_LANE2_RESET |
413 DPIO_PCS_TX_LANE1_RESET);
414 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
415 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
416 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
417 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
418 DPIO_PCS_CLK_SOFT_RESET);
419
420 /* Fix up inter-pair skew failure */
421 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
422 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
423 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
424 mutex_unlock(&dev_priv->sb_lock);
425}
426
427void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
428{
429 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
430 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
431 struct drm_device *dev = encoder->base.dev;
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
434 enum dpio_channel port = vlv_dport_to_channel(dport);
435 int pipe = intel_crtc->pipe;
436 u32 val;
437
438 mutex_lock(&dev_priv->sb_lock);
439
440 /* Enable clock channels for this port */
441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
442 val = 0;
443 if (pipe)
444 val |= (1<<21);
445 else
446 val &= ~(1<<21);
447 val |= 0x001000c4;
448 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
449
450 /* Program lane clock */
451 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
452 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
453
454 mutex_unlock(&dev_priv->sb_lock);
455}
456
457void vlv_phy_reset_lanes(struct intel_encoder *encoder)
458{
459 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
460 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
461 struct intel_crtc *intel_crtc =
462 to_intel_crtc(encoder->base.crtc);
463 enum dpio_channel port = vlv_dport_to_channel(dport);
464 int pipe = intel_crtc->pipe;
465
466 mutex_lock(&dev_priv->sb_lock);
467 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
468 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
469 mutex_unlock(&dev_priv->sb_lock);
470}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 3ac705936b04..c283ba4babe8 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1508 int clock = crtc_state->port_clock; 1508 int clock = crtc_state->port_clock;
1509 1509
1510 if (encoder->type == INTEL_OUTPUT_HDMI) { 1510 if (encoder->type == INTEL_OUTPUT_HDMI) {
1511 intel_clock_t best_clock; 1511 struct dpll best_clock;
1512 1512
1513 /* Calculate HDMI div */ 1513 /* Calculate HDMI div */
1514 /* 1514 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a28b4aac1e02..97de5e05890a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -266,7 +266,7 @@ struct intel_connector {
266 struct intel_dp *mst_port; 266 struct intel_dp *mst_port;
267}; 267};
268 268
269typedef struct dpll { 269struct dpll {
270 /* given values */ 270 /* given values */
271 int n; 271 int n;
272 int m1, m2; 272 int m1, m2;
@@ -276,7 +276,7 @@ typedef struct dpll {
276 int vco; 276 int vco;
277 int m; 277 int m;
278 int p; 278 int p;
279} intel_clock_t; 279};
280 280
281struct intel_atomic_state { 281struct intel_atomic_state {
282 struct drm_atomic_state base; 282 struct drm_atomic_state base;
@@ -291,17 +291,29 @@ struct intel_atomic_state {
291 291
292 bool dpll_set, modeset; 292 bool dpll_set, modeset;
293 293
294 /*
295 * Does this transaction change the pipes that are active? This mask
296 * tracks which CRTC's have changed their active state at the end of
297 * the transaction (not counting the temporary disable during modesets).
298 * This mask should only be non-zero when intel_state->modeset is true,
299 * but the converse is not necessarily true; simply changing a mode may
300 * not flip the final active status of any CRTC's
301 */
302 unsigned int active_pipe_changes;
303
294 unsigned int active_crtcs; 304 unsigned int active_crtcs;
295 unsigned int min_pixclk[I915_MAX_PIPES]; 305 unsigned int min_pixclk[I915_MAX_PIPES];
296 306
297 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; 307 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
298 struct intel_wm_config wm_config;
299 308
300 /* 309 /*
301 * Current watermarks can't be trusted during hardware readout, so 310 * Current watermarks can't be trusted during hardware readout, so
302 * don't bother calculating intermediate watermarks. 311 * don't bother calculating intermediate watermarks.
303 */ 312 */
304 bool skip_intermediate_wm; 313 bool skip_intermediate_wm;
314
315 /* Gen9+ only */
316 struct skl_wm_values wm_results;
305}; 317};
306 318
307struct intel_plane_state { 319struct intel_plane_state {
@@ -405,6 +417,48 @@ struct skl_pipe_wm {
405 uint32_t linetime; 417 uint32_t linetime;
406}; 418};
407 419
420struct intel_crtc_wm_state {
421 union {
422 struct {
423 /*
424 * Intermediate watermarks; these can be
425 * programmed immediately since they satisfy
426 * both the current configuration we're
427 * switching away from and the new
428 * configuration we're switching to.
429 */
430 struct intel_pipe_wm intermediate;
431
432 /*
433 * Optimal watermarks, programmed post-vblank
434 * when this state is committed.
435 */
436 struct intel_pipe_wm optimal;
437 } ilk;
438
439 struct {
440 /* gen9+ only needs 1-step wm programming */
441 struct skl_pipe_wm optimal;
442
443 /* cached plane data rate */
444 unsigned plane_data_rate[I915_MAX_PLANES];
445 unsigned plane_y_data_rate[I915_MAX_PLANES];
446
447 /* minimum block allocation */
448 uint16_t minimum_blocks[I915_MAX_PLANES];
449 uint16_t minimum_y_blocks[I915_MAX_PLANES];
450 } skl;
451 };
452
453 /*
454 * Platforms with two-step watermark programming will need to
455 * update watermark programming post-vblank to switch from the
456 * safe intermediate watermarks to the optimal final
457 * watermarks.
458 */
459 bool need_postvbl_update;
460};
461
408struct intel_crtc_state { 462struct intel_crtc_state {
409 struct drm_crtc_state base; 463 struct drm_crtc_state base;
410 464
@@ -558,32 +612,7 @@ struct intel_crtc_state {
558 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ 612 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
559 bool disable_lp_wm; 613 bool disable_lp_wm;
560 614
561 struct { 615 struct intel_crtc_wm_state wm;
562 /*
563 * Optimal watermarks, programmed post-vblank when this state
564 * is committed.
565 */
566 union {
567 struct intel_pipe_wm ilk;
568 struct skl_pipe_wm skl;
569 } optimal;
570
571 /*
572 * Intermediate watermarks; these can be programmed immediately
573 * since they satisfy both the current configuration we're
574 * switching away from and the new configuration we're switching
575 * to.
576 */
577 struct intel_pipe_wm intermediate;
578
579 /*
580 * Platforms with two-step watermark programming will need to
581 * update watermark programming post-vblank to switch from the
582 * safe intermediate watermarks to the optimal final
583 * watermarks.
584 */
585 bool need_postvbl_update;
586 } wm;
587 616
588 /* Gamma mode programmed on the pipe */ 617 /* Gamma mode programmed on the pipe */
589 uint32_t gamma_mode; 618 uint32_t gamma_mode;
@@ -598,14 +627,6 @@ struct vlv_wm_state {
598 bool cxsr; 627 bool cxsr;
599}; 628};
600 629
601struct intel_mmio_flip {
602 struct work_struct work;
603 struct drm_i915_private *i915;
604 struct drm_i915_gem_request *req;
605 struct intel_crtc *crtc;
606 unsigned int rotation;
607};
608
609struct intel_crtc { 630struct intel_crtc {
610 struct drm_crtc base; 631 struct drm_crtc base;
611 enum pipe pipe; 632 enum pipe pipe;
@@ -620,7 +641,7 @@ struct intel_crtc {
620 unsigned long enabled_power_domains; 641 unsigned long enabled_power_domains;
621 bool lowfreq_avail; 642 bool lowfreq_avail;
622 struct intel_overlay *overlay; 643 struct intel_overlay *overlay;
623 struct intel_unpin_work *unpin_work; 644 struct intel_flip_work *flip_work;
624 645
625 atomic_t unpin_work_count; 646 atomic_t unpin_work_count;
626 647
@@ -815,6 +836,7 @@ struct intel_dp {
815 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 836 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
816 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 837 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
817 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 838 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
839 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
818 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ 840 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
819 uint8_t num_sink_rates; 841 uint8_t num_sink_rates;
820 int sink_rates[DP_MAX_SUPPORTED_RATES]; 842 int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -947,22 +969,21 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
947 return dev_priv->plane_to_crtc_mapping[plane]; 969 return dev_priv->plane_to_crtc_mapping[plane];
948} 970}
949 971
950struct intel_unpin_work { 972struct intel_flip_work {
951 struct work_struct work; 973 struct work_struct unpin_work;
974 struct work_struct mmio_work;
975
952 struct drm_crtc *crtc; 976 struct drm_crtc *crtc;
953 struct drm_framebuffer *old_fb; 977 struct drm_framebuffer *old_fb;
954 struct drm_i915_gem_object *pending_flip_obj; 978 struct drm_i915_gem_object *pending_flip_obj;
955 struct drm_pending_vblank_event *event; 979 struct drm_pending_vblank_event *event;
956 atomic_t pending; 980 atomic_t pending;
957#define INTEL_FLIP_INACTIVE 0
958#define INTEL_FLIP_PENDING 1
959#define INTEL_FLIP_COMPLETE 2
960 u32 flip_count; 981 u32 flip_count;
961 u32 gtt_offset; 982 u32 gtt_offset;
962 struct drm_i915_gem_request *flip_queued_req; 983 struct drm_i915_gem_request *flip_queued_req;
963 u32 flip_queued_vblank; 984 u32 flip_queued_vblank;
964 u32 flip_ready_vblank; 985 u32 flip_ready_vblank;
965 bool enable_stall_check; 986 unsigned int rotation;
966}; 987};
967 988
968struct intel_load_detect_pipe { 989struct intel_load_detect_pipe {
@@ -1031,9 +1052,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1031void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1052void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1032void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1053void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1033void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1054void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1034void gen6_reset_rps_interrupts(struct drm_device *dev); 1055void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
1035void gen6_enable_rps_interrupts(struct drm_device *dev); 1056void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
1036void gen6_disable_rps_interrupts(struct drm_device *dev); 1057void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
1037u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); 1058u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
1038void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); 1059void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
1039void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); 1060void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1112,14 +1133,15 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
1112void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); 1133void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
1113 1134
1114/* intel_display.c */ 1135/* intel_display.c */
1136void intel_update_rawclk(struct drm_i915_private *dev_priv);
1115int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 1137int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
1116 const char *name, u32 reg, int ref_freq); 1138 const char *name, u32 reg, int ref_freq);
1117extern const struct drm_plane_funcs intel_plane_funcs; 1139extern const struct drm_plane_funcs intel_plane_funcs;
1118void intel_init_display_hooks(struct drm_i915_private *dev_priv); 1140void intel_init_display_hooks(struct drm_i915_private *dev_priv);
1119unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); 1141unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
1120bool intel_has_pending_fb_unpin(struct drm_device *dev); 1142bool intel_has_pending_fb_unpin(struct drm_device *dev);
1121void intel_mark_busy(struct drm_device *dev); 1143void intel_mark_busy(struct drm_i915_private *dev_priv);
1122void intel_mark_idle(struct drm_device *dev); 1144void intel_mark_idle(struct drm_i915_private *dev_priv);
1123void intel_crtc_restore_mode(struct drm_crtc *crtc); 1145void intel_crtc_restore_mode(struct drm_crtc *crtc);
1124int intel_display_suspend(struct drm_device *dev); 1146int intel_display_suspend(struct drm_device *dev);
1125void intel_encoder_destroy(struct drm_encoder *encoder); 1147void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1151,6 +1173,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
1151 if (crtc->active) 1173 if (crtc->active)
1152 intel_wait_for_vblank(dev, pipe); 1174 intel_wait_for_vblank(dev, pipe);
1153} 1175}
1176
1177u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
1178
1154int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 1179int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
1155void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1180void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1156 struct intel_digital_port *dport, 1181 struct intel_digital_port *dport,
@@ -1164,14 +1189,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1164 struct drm_modeset_acquire_ctx *ctx); 1189 struct drm_modeset_acquire_ctx *ctx);
1165int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1190int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1166 unsigned int rotation); 1191 unsigned int rotation);
1192void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
1167struct drm_framebuffer * 1193struct drm_framebuffer *
1168__intel_framebuffer_create(struct drm_device *dev, 1194__intel_framebuffer_create(struct drm_device *dev,
1169 struct drm_mode_fb_cmd2 *mode_cmd, 1195 struct drm_mode_fb_cmd2 *mode_cmd,
1170 struct drm_i915_gem_object *obj); 1196 struct drm_i915_gem_object *obj);
1171void intel_prepare_page_flip(struct drm_device *dev, int plane); 1197void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
1172void intel_finish_page_flip(struct drm_device *dev, int pipe); 1198void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
1173void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 1199void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
1174void intel_check_page_flip(struct drm_device *dev, int pipe);
1175int intel_prepare_plane_fb(struct drm_plane *plane, 1200int intel_prepare_plane_fb(struct drm_plane *plane,
1176 const struct drm_plane_state *new_state); 1201 const struct drm_plane_state *new_state);
1177void intel_cleanup_plane_fb(struct drm_plane *plane, 1202void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1228,8 +1253,8 @@ u32 intel_compute_tile_offset(int *x, int *y,
1228 const struct drm_framebuffer *fb, int plane, 1253 const struct drm_framebuffer *fb, int plane,
1229 unsigned int pitch, 1254 unsigned int pitch,
1230 unsigned int rotation); 1255 unsigned int rotation);
1231void intel_prepare_reset(struct drm_device *dev); 1256void intel_prepare_reset(struct drm_i915_private *dev_priv);
1232void intel_finish_reset(struct drm_device *dev); 1257void intel_finish_reset(struct drm_i915_private *dev_priv);
1233void hsw_enable_pc8(struct drm_i915_private *dev_priv); 1258void hsw_enable_pc8(struct drm_i915_private *dev_priv);
1234void hsw_disable_pc8(struct drm_i915_private *dev_priv); 1259void hsw_disable_pc8(struct drm_i915_private *dev_priv);
1235void broxton_init_cdclk(struct drm_i915_private *dev_priv); 1260void broxton_init_cdclk(struct drm_i915_private *dev_priv);
@@ -1252,8 +1277,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
1252void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1277void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
1253int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1278int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
1254bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1279bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1255 intel_clock_t *best_clock); 1280 struct dpll *best_clock);
1256int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); 1281int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
1257 1282
1258bool intel_crtc_active(struct drm_crtc *crtc); 1283bool intel_crtc_active(struct drm_crtc *crtc);
1259void hsw_enable_ips(struct intel_crtc *crtc); 1284void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1339,12 +1364,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1339bool 1364bool
1340intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); 1365intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1341 1366
1367static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
1368{
1369 return ~((1 << lane_count) - 1) & 0xf;
1370}
1371
1372/* intel_dp_aux_backlight.c */
1373int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
1374
1342/* intel_dp_mst.c */ 1375/* intel_dp_mst.c */
1343int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1376int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1344void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1377void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
1345/* intel_dsi.c */ 1378/* intel_dsi.c */
1346void intel_dsi_init(struct drm_device *dev); 1379void intel_dsi_init(struct drm_device *dev);
1347 1380
1381/* intel_dsi_dcs_backlight.c */
1382int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
1348 1383
1349/* intel_dvo.c */ 1384/* intel_dvo.c */
1350void intel_dvo_init(struct drm_device *dev); 1385void intel_dvo_init(struct drm_device *dev);
@@ -1424,13 +1459,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1424 1459
1425 1460
1426/* intel_overlay.c */ 1461/* intel_overlay.c */
1427void intel_setup_overlay(struct drm_device *dev); 1462void intel_setup_overlay(struct drm_i915_private *dev_priv);
1428void intel_cleanup_overlay(struct drm_device *dev); 1463void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
1429int intel_overlay_switch_off(struct intel_overlay *overlay); 1464int intel_overlay_switch_off(struct intel_overlay *overlay);
1430int intel_overlay_put_image(struct drm_device *dev, void *data, 1465int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1431 struct drm_file *file_priv); 1466 struct drm_file *file_priv);
1432int intel_overlay_attrs(struct drm_device *dev, void *data, 1467int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1433 struct drm_file *file_priv); 1468 struct drm_file *file_priv);
1434void intel_overlay_reset(struct drm_i915_private *dev_priv); 1469void intel_overlay_reset(struct drm_i915_private *dev_priv);
1435 1470
1436 1471
@@ -1601,21 +1636,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
1601void intel_pm_setup(struct drm_device *dev); 1636void intel_pm_setup(struct drm_device *dev);
1602void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1637void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1603void intel_gpu_ips_teardown(void); 1638void intel_gpu_ips_teardown(void);
1604void intel_init_gt_powersave(struct drm_device *dev); 1639void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
1605void intel_cleanup_gt_powersave(struct drm_device *dev); 1640void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
1606void intel_enable_gt_powersave(struct drm_device *dev); 1641void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
1607void intel_disable_gt_powersave(struct drm_device *dev); 1642void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
1608void intel_suspend_gt_powersave(struct drm_device *dev); 1643void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
1609void intel_reset_gt_powersave(struct drm_device *dev); 1644void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
1610void gen6_update_ring_freq(struct drm_device *dev); 1645void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
1611void gen6_rps_busy(struct drm_i915_private *dev_priv); 1646void gen6_rps_busy(struct drm_i915_private *dev_priv);
1612void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); 1647void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
1613void gen6_rps_idle(struct drm_i915_private *dev_priv); 1648void gen6_rps_idle(struct drm_i915_private *dev_priv);
1614void gen6_rps_boost(struct drm_i915_private *dev_priv, 1649void gen6_rps_boost(struct drm_i915_private *dev_priv,
1615 struct intel_rps_client *rps, 1650 struct intel_rps_client *rps,
1616 unsigned long submitted); 1651 unsigned long submitted);
1617void intel_queue_rps_boost_for_request(struct drm_device *dev, 1652void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
1618 struct drm_i915_gem_request *req);
1619void vlv_wm_get_hw_state(struct drm_device *dev); 1653void vlv_wm_get_hw_state(struct drm_device *dev);
1620void ilk_wm_get_hw_state(struct drm_device *dev); 1654void ilk_wm_get_hw_state(struct drm_device *dev);
1621void skl_wm_get_hw_state(struct drm_device *dev); 1655void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1623,7 +1657,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1623 struct skl_ddb_allocation *ddb /* out */); 1657 struct skl_ddb_allocation *ddb /* out */);
1624uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1658uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1625bool ilk_disable_lp_wm(struct drm_device *dev); 1659bool ilk_disable_lp_wm(struct drm_device *dev);
1626int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); 1660int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
1661static inline int intel_enable_rc6(void)
1662{
1663 return i915.enable_rc6;
1664}
1627 1665
1628/* intel_sdvo.c */ 1666/* intel_sdvo.c */
1629bool intel_sdvo_init(struct drm_device *dev, 1667bool intel_sdvo_init(struct drm_device *dev,
@@ -1635,7 +1673,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1635int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1673int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1636 struct drm_file *file_priv); 1674 struct drm_file *file_priv);
1637void intel_pipe_update_start(struct intel_crtc *crtc); 1675void intel_pipe_update_start(struct intel_crtc *crtc);
1638void intel_pipe_update_end(struct intel_crtc *crtc); 1676void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
1639 1677
1640/* intel_tv.c */ 1678/* intel_tv.c */
1641void intel_tv_init(struct drm_device *dev); 1679void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 366ad6c67ce4..4009618a5b34 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -532,7 +532,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
534 enum port port; 534 enum port port;
535 u32 tmp;
536 535
537 DRM_DEBUG_KMS("\n"); 536 DRM_DEBUG_KMS("\n");
538 537
@@ -551,11 +550,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
551 550
552 msleep(intel_dsi->panel_on_delay); 551 msleep(intel_dsi->panel_on_delay);
553 552
554 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 553 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
554 u32 val;
555
555 /* Disable DPOunit clock gating, can stall pipe */ 556 /* Disable DPOunit clock gating, can stall pipe */
556 tmp = I915_READ(DSPCLK_GATE_D); 557 val = I915_READ(DSPCLK_GATE_D);
557 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 558 val |= DPOUNIT_CLOCK_GATE_DISABLE;
558 I915_WRITE(DSPCLK_GATE_D, tmp); 559 I915_WRITE(DSPCLK_GATE_D, val);
559 } 560 }
560 561
561 /* put device in ready state */ 562 /* put device in ready state */
@@ -693,7 +694,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
693 694
694 intel_dsi_clear_device_ready(encoder); 695 intel_dsi_clear_device_ready(encoder);
695 696
696 if (!IS_BROXTON(dev_priv)) { 697 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
697 u32 val; 698 u32 val;
698 699
699 val = I915_READ(DSPCLK_GATE_D); 700 val = I915_READ(DSPCLK_GATE_D);
@@ -1473,10 +1474,42 @@ void intel_dsi_init(struct drm_device *dev)
1473 else 1474 else
1474 intel_encoder->crtc_mask = BIT(PIPE_B); 1475 intel_encoder->crtc_mask = BIT(PIPE_B);
1475 1476
1476 if (dev_priv->vbt.dsi.config->dual_link) 1477 if (dev_priv->vbt.dsi.config->dual_link) {
1477 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); 1478 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
1478 else 1479
1480 switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
1481 case DL_DCS_PORT_A:
1482 intel_dsi->dcs_backlight_ports = BIT(PORT_A);
1483 break;
1484 case DL_DCS_PORT_C:
1485 intel_dsi->dcs_backlight_ports = BIT(PORT_C);
1486 break;
1487 default:
1488 case DL_DCS_PORT_A_AND_C:
1489 intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
1490 break;
1491 }
1492
1493 switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
1494 case DL_DCS_PORT_A:
1495 intel_dsi->dcs_cabc_ports = BIT(PORT_A);
1496 break;
1497 case DL_DCS_PORT_C:
1498 intel_dsi->dcs_cabc_ports = BIT(PORT_C);
1499 break;
1500 default:
1501 case DL_DCS_PORT_A_AND_C:
1502 intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
1503 break;
1504 }
1505 } else {
1479 intel_dsi->ports = BIT(port); 1506 intel_dsi->ports = BIT(port);
1507 intel_dsi->dcs_backlight_ports = BIT(port);
1508 intel_dsi->dcs_cabc_ports = BIT(port);
1509 }
1510
1511 if (!dev_priv->vbt.dsi.config->cabc_supported)
1512 intel_dsi->dcs_cabc_ports = 0;
1480 1513
1481 /* Create a DSI host (and a device) for each port. */ 1514 /* Create a DSI host (and a device) for each port. */
1482 for_each_dsi_port(port, intel_dsi->ports) { 1515 for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 61a6957fc6c2..5967ea6d6045 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -78,6 +78,10 @@ struct intel_dsi {
78 78
79 u8 escape_clk_div; 79 u8 escape_clk_div;
80 u8 dual_link; 80 u8 dual_link;
81
82 u16 dcs_backlight_ports;
83 u16 dcs_cabc_ports;
84
81 u8 pixel_overlap; 85 u8 pixel_overlap;
82 u32 port_bits; 86 u32 port_bits;
83 u32 bw_timer; 87 u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000000..f0dc427743f8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Deepak M <m.deepak at intel.com>
24 */
25
26#include "intel_drv.h"
27#include "intel_dsi.h"
28#include "i915_drv.h"
29#include <video/mipi_display.h>
30#include <drm/drm_mipi_dsi.h>
31
32#define CONTROL_DISPLAY_BCTRL (1 << 5)
33#define CONTROL_DISPLAY_DD (1 << 3)
34#define CONTROL_DISPLAY_BL (1 << 2)
35
36#define POWER_SAVE_OFF (0 << 0)
37#define POWER_SAVE_LOW (1 << 0)
38#define POWER_SAVE_MEDIUM (2 << 0)
39#define POWER_SAVE_HIGH (3 << 0)
40#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
41
42#define PANEL_PWM_MAX_VALUE 0xFF
43
44static u32 dcs_get_backlight(struct intel_connector *connector)
45{
46 struct intel_encoder *encoder = connector->encoder;
47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
48 struct mipi_dsi_device *dsi_device;
49 u8 data;
50 enum port port;
51
52 /* FIXME: Need to take care of 16 bit brightness level */
53 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
54 dsi_device = intel_dsi->dsi_hosts[port]->device;
55 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
56 &data, sizeof(data));
57 break;
58 }
59
60 return data;
61}
62
63static void dcs_set_backlight(struct intel_connector *connector, u32 level)
64{
65 struct intel_encoder *encoder = connector->encoder;
66 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
67 struct mipi_dsi_device *dsi_device;
68 u8 data = level;
69 enum port port;
70
71 /* FIXME: Need to take care of 16 bit brightness level */
72 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
73 dsi_device = intel_dsi->dsi_hosts[port]->device;
74 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
75 &data, sizeof(data));
76 }
77}
78
79static void dcs_disable_backlight(struct intel_connector *connector)
80{
81 struct intel_encoder *encoder = connector->encoder;
82 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
83 struct mipi_dsi_device *dsi_device;
84 enum port port;
85
86 dcs_set_backlight(connector, 0);
87
88 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
89 u8 cabc = POWER_SAVE_OFF;
90
91 dsi_device = intel_dsi->dsi_hosts[port]->device;
92 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
93 &cabc, sizeof(cabc));
94 }
95
96 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
97 u8 ctrl = 0;
98
99 dsi_device = intel_dsi->dsi_hosts[port]->device;
100
101 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
102 &ctrl, sizeof(ctrl));
103
104 ctrl &= ~CONTROL_DISPLAY_BL;
105 ctrl &= ~CONTROL_DISPLAY_DD;
106 ctrl &= ~CONTROL_DISPLAY_BCTRL;
107
108 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
109 &ctrl, sizeof(ctrl));
110 }
111}
112
113static void dcs_enable_backlight(struct intel_connector *connector)
114{
115 struct intel_encoder *encoder = connector->encoder;
116 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
117 struct intel_panel *panel = &connector->panel;
118 struct mipi_dsi_device *dsi_device;
119 enum port port;
120
121 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
122 u8 ctrl = 0;
123
124 dsi_device = intel_dsi->dsi_hosts[port]->device;
125
126 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
127 &ctrl, sizeof(ctrl));
128
129 ctrl |= CONTROL_DISPLAY_BL;
130 ctrl |= CONTROL_DISPLAY_DD;
131 ctrl |= CONTROL_DISPLAY_BCTRL;
132
133 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
134 &ctrl, sizeof(ctrl));
135 }
136
137 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
138 u8 cabc = POWER_SAVE_MEDIUM;
139
140 dsi_device = intel_dsi->dsi_hosts[port]->device;
141 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
142 &cabc, sizeof(cabc));
143 }
144
145 dcs_set_backlight(connector, panel->backlight.level);
146}
147
148static int dcs_setup_backlight(struct intel_connector *connector,
149 enum pipe unused)
150{
151 struct intel_panel *panel = &connector->panel;
152
153 panel->backlight.max = PANEL_PWM_MAX_VALUE;
154 panel->backlight.level = PANEL_PWM_MAX_VALUE;
155
156 return 0;
157}
158
159int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
160{
161 struct drm_device *dev = intel_connector->base.dev;
162 struct drm_i915_private *dev_priv = dev->dev_private;
163 struct intel_encoder *encoder = intel_connector->encoder;
164 struct intel_panel *panel = &intel_connector->panel;
165
166 if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
167 return -ENODEV;
168
169 if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
170 return -EINVAL;
171
172 panel->backlight.setup = dcs_setup_backlight;
173 panel->backlight.enable = dcs_enable_backlight;
174 panel->backlight.disable = dcs_disable_backlight;
175 panel->backlight.set = dcs_set_backlight;
176 panel->backlight.get = dcs_get_backlight;
177
178 return 0;
179}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e498f1c3221e..f122484bedfc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, 95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
96}; 96};
97 97
98#define CHV_GPIO_IDX_START_N 0
99#define CHV_GPIO_IDX_START_E 73
100#define CHV_GPIO_IDX_START_SW 100
101#define CHV_GPIO_IDX_START_SE 198
102
103#define CHV_VBT_MAX_PINS_PER_FMLY 15
104
105#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
106#define CHV_GPIO_GPIOEN (1 << 15)
107#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
108#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
109#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
110#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
111#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
112
113#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
114#define CHV_GPIO_CFGLOCK (1 << 31)
115
98static inline enum port intel_dsi_seq_port_to_port(u8 port) 116static inline enum port intel_dsi_seq_port_to_port(u8 port)
99{ 117{
100 return port ? PORT_C : PORT_A; 118 return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
203 map = &vlv_gpio_table[gpio_index]; 221 map = &vlv_gpio_table[gpio_index];
204 222
205 if (dev_priv->vbt.dsi.seq_version >= 3) { 223 if (dev_priv->vbt.dsi.seq_version >= 3) {
206 DRM_DEBUG_KMS("GPIO element v3 not supported\n"); 224 /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
207 return; 225 port = IOSF_PORT_GPIO_NC;
208 } else { 226 } else {
209 if (gpio_source == 0) { 227 if (gpio_source == 0) {
210 port = IOSF_PORT_GPIO_NC; 228 port = IOSF_PORT_GPIO_NC;
211 } else if (gpio_source == 1) { 229 } else if (gpio_source == 1) {
212 port = IOSF_PORT_GPIO_SC; 230 DRM_DEBUG_KMS("SC gpio not supported\n");
231 return;
213 } else { 232 } else {
214 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); 233 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
215 return; 234 return;
@@ -231,6 +250,56 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
231 mutex_unlock(&dev_priv->sb_lock); 250 mutex_unlock(&dev_priv->sb_lock);
232} 251}
233 252
253static void chv_exec_gpio(struct drm_i915_private *dev_priv,
254 u8 gpio_source, u8 gpio_index, bool value)
255{
256 u16 cfg0, cfg1;
257 u16 family_num;
258 u8 port;
259
260 if (dev_priv->vbt.dsi.seq_version >= 3) {
261 if (gpio_index >= CHV_GPIO_IDX_START_SE) {
262 /* XXX: it's unclear whether 255->57 is part of SE. */
263 gpio_index -= CHV_GPIO_IDX_START_SE;
264 port = CHV_IOSF_PORT_GPIO_SE;
265 } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
266 gpio_index -= CHV_GPIO_IDX_START_SW;
267 port = CHV_IOSF_PORT_GPIO_SW;
268 } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
269 gpio_index -= CHV_GPIO_IDX_START_E;
270 port = CHV_IOSF_PORT_GPIO_E;
271 } else {
272 port = CHV_IOSF_PORT_GPIO_N;
273 }
274 } else {
275 /* XXX: The spec is unclear about CHV GPIO on seq v2 */
276 if (gpio_source != 0) {
277 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
278 return;
279 }
280
281 if (gpio_index >= CHV_GPIO_IDX_START_E) {
282 DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
283 gpio_index);
284 return;
285 }
286
287 port = CHV_IOSF_PORT_GPIO_N;
288 }
289
290 family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
291 gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
292
293 cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
294 cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
295
296 mutex_lock(&dev_priv->sb_lock);
297 vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
298 vlv_iosf_sb_write(dev_priv, port, cfg0,
299 CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
300 mutex_unlock(&dev_priv->sb_lock);
301}
302
234static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) 303static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
235{ 304{
236 struct drm_device *dev = intel_dsi->base.base.dev; 305 struct drm_device *dev = intel_dsi->base.base.dev;
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
254 323
255 if (IS_VALLEYVIEW(dev_priv)) 324 if (IS_VALLEYVIEW(dev_priv))
256 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); 325 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
326 else if (IS_CHERRYVIEW(dev_priv))
327 chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
257 else 328 else
258 DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); 329 DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
259 330
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d5a7cfec589b..0dea5fbcd8aa 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -740,7 +740,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
740 740
741 /* FIXME: We lack the proper locking here, so only run this on the 741 /* FIXME: We lack the proper locking here, so only run this on the
742 * platforms that need. */ 742 * platforms that need. */
743 if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) 743 if (IS_GEN(dev_priv, 5, 6))
744 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); 744 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
745 cache->fb.pixel_format = fb->pixel_format; 745 cache->fb.pixel_format = fb->pixel_format;
746 cache->fb.stride = fb->pitches[0]; 746 cache->fb.stride = fb->pitches[0];
@@ -827,7 +827,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
827 bool enable_by_default = IS_HASWELL(dev_priv) || 827 bool enable_by_default = IS_HASWELL(dev_priv) ||
828 IS_BROADWELL(dev_priv); 828 IS_BROADWELL(dev_priv);
829 829
830 if (intel_vgpu_active(dev_priv->dev)) { 830 if (intel_vgpu_active(dev_priv)) {
831 fbc->no_fbc_reason = "VGPU is active"; 831 fbc->no_fbc_reason = "VGPU is active";
832 return false; 832 return false;
833 } 833 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ab8d09a81f14..99e27530e264 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
150 if (size * 2 < ggtt->stolen_usable_size) 150 if (size * 2 < ggtt->stolen_usable_size)
151 obj = i915_gem_object_create_stolen(dev, size); 151 obj = i915_gem_object_create_stolen(dev, size);
152 if (obj == NULL) 152 if (obj == NULL)
153 obj = i915_gem_alloc_object(dev, size); 153 obj = i915_gem_object_create(dev, size);
154 if (!obj) { 154 if (IS_ERR(obj)) {
155 DRM_ERROR("failed to allocate framebuffer\n"); 155 DRM_ERROR("failed to allocate framebuffer\n");
156 ret = -ENOMEM; 156 ret = PTR_ERR(obj);
157 goto out; 157 goto out;
158 } 158 }
159 159
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
186 struct i915_ggtt *ggtt = &dev_priv->ggtt; 186 struct i915_ggtt *ggtt = &dev_priv->ggtt;
187 struct fb_info *info; 187 struct fb_info *info;
188 struct drm_framebuffer *fb; 188 struct drm_framebuffer *fb;
189 struct i915_vma *vma;
189 struct drm_i915_gem_object *obj; 190 struct drm_i915_gem_object *obj;
190 int size, ret;
191 bool prealloc = false; 191 bool prealloc = false;
192 void *vaddr;
193 int ret;
192 194
193 if (intel_fb && 195 if (intel_fb &&
194 (sizes->fb_width > intel_fb->base.width || 196 (sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
214 } 216 }
215 217
216 obj = intel_fb->obj; 218 obj = intel_fb->obj;
217 size = obj->base.size;
218 219
219 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&dev->struct_mutex);
220 221
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
244 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 245 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
245 info->fbops = &intelfb_ops; 246 info->fbops = &intelfb_ops;
246 247
248 vma = i915_gem_obj_to_ggtt(obj);
249
247 /* setup aperture base/size for vesafb takeover */ 250 /* setup aperture base/size for vesafb takeover */
248 info->apertures->ranges[0].base = dev->mode_config.fb_base; 251 info->apertures->ranges[0].base = dev->mode_config.fb_base;
249 info->apertures->ranges[0].size = ggtt->mappable_end; 252 info->apertures->ranges[0].size = ggtt->mappable_end;
250 253
251 info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); 254 info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
252 info->fix.smem_len = size; 255 info->fix.smem_len = vma->node.size;
253 256
254 info->screen_base = 257 vaddr = i915_vma_pin_iomap(vma);
255 ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), 258 if (IS_ERR(vaddr)) {
256 size);
257 if (!info->screen_base) {
258 DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); 259 DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
259 ret = -ENOSPC; 260 ret = PTR_ERR(vaddr);
260 goto out_destroy_fbi; 261 goto out_destroy_fbi;
261 } 262 }
262 info->screen_size = size; 263 info->screen_base = vaddr;
264 info->screen_size = vma->node.size;
263 265
264 /* This driver doesn't need a VT switch to restore the mode on resume */ 266 /* This driver doesn't need a VT switch to restore the mode on resume */
265 info->skip_vt_switch = true; 267 info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
287out_destroy_fbi: 289out_destroy_fbi:
288 drm_fb_helper_release_fbi(helper); 290 drm_fb_helper_release_fbi(helper);
289out_unpin: 291out_unpin:
290 i915_gem_object_ggtt_unpin(obj); 292 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
291out_unlock: 293out_unlock:
292 mutex_unlock(&dev->struct_mutex); 294 mutex_unlock(&dev->struct_mutex);
293 return ret; 295 return ret;
@@ -551,6 +553,11 @@ static void intel_fbdev_destroy(struct drm_device *dev,
551 553
552 if (ifbdev->fb) { 554 if (ifbdev->fb) {
553 drm_framebuffer_unregister_private(&ifbdev->fb->base); 555 drm_framebuffer_unregister_private(&ifbdev->fb->base);
556
557 mutex_lock(&dev->struct_mutex);
558 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
559 mutex_unlock(&dev->struct_mutex);
560
554 drm_framebuffer_remove(&ifbdev->fb->base); 561 drm_framebuffer_remove(&ifbdev->fb->base);
555 } 562 }
556} 563}
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 876e5da44c4e..34405de72dfa 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,9 +59,12 @@
59 * 59 *
60 */ 60 */
61 61
62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" 62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
63MODULE_FIRMWARE(I915_SKL_GUC_UCODE); 63MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
64 64
65#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
66MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
67
65/* User-friendly representation of an enum */ 68/* User-friendly representation of an enum */
66const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) 69const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
67{ 70{
@@ -281,6 +284,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
281 return ret; 284 return ret;
282} 285}
283 286
287static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
288{
289 u32 wopcm_size = GUC_WOPCM_TOP;
290
291 /* On BXT, the top of WOPCM is reserved for RC6 context */
292 if (IS_BROXTON(dev_priv))
293 wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
294
295 return wopcm_size;
296}
297
284/* 298/*
285 * Load the GuC firmware blob into the MinuteIA. 299 * Load the GuC firmware blob into the MinuteIA.
286 */ 300 */
@@ -308,7 +322,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
308 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 322 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
309 323
310 /* init WOPCM */ 324 /* init WOPCM */
311 I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); 325 I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
312 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); 326 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
313 327
314 /* Enable MIA caching. GuC clock gating is disabled. */ 328 /* Enable MIA caching. GuC clock gating is disabled. */
@@ -552,9 +566,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
552 566
553 /* Header and uCode will be loaded to WOPCM. Size of the two. */ 567 /* Header and uCode will be loaded to WOPCM. Size of the two. */
554 size = guc_fw->header_size + guc_fw->ucode_size; 568 size = guc_fw->header_size + guc_fw->ucode_size;
555 569 if (size > guc_wopcm_size(dev->dev_private)) {
556 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
557 if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
558 DRM_ERROR("Firmware is too large to fit in WOPCM\n"); 570 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
559 goto fail; 571 goto fail;
560 } 572 }
@@ -640,6 +652,10 @@ void intel_guc_ucode_init(struct drm_device *dev)
640 fw_path = I915_SKL_GUC_UCODE; 652 fw_path = I915_SKL_GUC_UCODE;
641 guc_fw->guc_fw_major_wanted = 6; 653 guc_fw->guc_fw_major_wanted = 6;
642 guc_fw->guc_fw_minor_wanted = 1; 654 guc_fw->guc_fw_minor_wanted = 1;
655 } else if (IS_BROXTON(dev)) {
656 fw_path = I915_BXT_GUC_UCODE;
657 guc_fw->guc_fw_major_wanted = 8;
658 guc_fw->guc_fw_minor_wanted = 7;
643 } else { 659 } else {
644 i915.enable_guc_submission = false; 660 i915.enable_guc_submission = false;
645 fw_path = ""; /* unknown device */ 661 fw_path = ""; /* unknown device */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2c3bd9c2573e..6b52c6accf6a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1678,35 +1678,12 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1678 struct intel_crtc *intel_crtc = 1678 struct intel_crtc *intel_crtc =
1679 to_intel_crtc(encoder->base.crtc); 1679 to_intel_crtc(encoder->base.crtc);
1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1681 enum dpio_channel port = vlv_dport_to_channel(dport);
1682 int pipe = intel_crtc->pipe;
1683 u32 val;
1684 1681
1685 /* Enable clock channels for this port */ 1682 vlv_phy_pre_encoder_enable(encoder);
1686 mutex_lock(&dev_priv->sb_lock);
1687 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1688 val = 0;
1689 if (pipe)
1690 val |= (1<<21);
1691 else
1692 val &= ~(1<<21);
1693 val |= 0x001000c4;
1694 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1695 1683
1696 /* HDMI 1.0V-2dB */ 1684 /* HDMI 1.0V-2dB */
1697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); 1685 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
1698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); 1686 0x2b247878);
1699 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
1700 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
1701 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
1702 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
1703 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1704 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1705
1706 /* Program lane clock */
1707 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1708 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1709 mutex_unlock(&dev_priv->sb_lock);
1710 1687
1711 intel_hdmi->set_infoframes(&encoder->base, 1688 intel_hdmi->set_infoframes(&encoder->base,
1712 intel_crtc->config->has_hdmi_sink, 1689 intel_crtc->config->has_hdmi_sink,
@@ -1719,207 +1696,27 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1719 1696
1720static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1697static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1721{ 1698{
1722 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1723 struct drm_device *dev = encoder->base.dev;
1724 struct drm_i915_private *dev_priv = dev->dev_private;
1725 struct intel_crtc *intel_crtc =
1726 to_intel_crtc(encoder->base.crtc);
1727 enum dpio_channel port = vlv_dport_to_channel(dport);
1728 int pipe = intel_crtc->pipe;
1729
1730 intel_hdmi_prepare(encoder); 1699 intel_hdmi_prepare(encoder);
1731 1700
1732 /* Program Tx lane resets to default */ 1701 vlv_phy_pre_pll_enable(encoder);
1733 mutex_lock(&dev_priv->sb_lock);
1734 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1735 DPIO_PCS_TX_LANE2_RESET |
1736 DPIO_PCS_TX_LANE1_RESET);
1737 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1738 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1739 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1740 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1741 DPIO_PCS_CLK_SOFT_RESET);
1742
1743 /* Fix up inter-pair skew failure */
1744 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1745 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1746 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1747
1748 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1749 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1750 mutex_unlock(&dev_priv->sb_lock);
1751}
1752
1753static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
1754 bool reset)
1755{
1756 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1757 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1758 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1759 enum pipe pipe = crtc->pipe;
1760 uint32_t val;
1761
1762 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1763 if (reset)
1764 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1765 else
1766 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1767 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1768
1769 if (crtc->config->lane_count > 2) {
1770 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1771 if (reset)
1772 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1773 else
1774 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1775 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1776 }
1777
1778 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1779 val |= CHV_PCS_REQ_SOFTRESET_EN;
1780 if (reset)
1781 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1782 else
1783 val |= DPIO_PCS_CLK_SOFT_RESET;
1784 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1785
1786 if (crtc->config->lane_count > 2) {
1787 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1788 val |= CHV_PCS_REQ_SOFTRESET_EN;
1789 if (reset)
1790 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1791 else
1792 val |= DPIO_PCS_CLK_SOFT_RESET;
1793 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1794 }
1795} 1702}
1796 1703
1797static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1704static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1798{ 1705{
1799 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1800 struct drm_device *dev = encoder->base.dev;
1801 struct drm_i915_private *dev_priv = dev->dev_private;
1802 struct intel_crtc *intel_crtc =
1803 to_intel_crtc(encoder->base.crtc);
1804 enum dpio_channel ch = vlv_dport_to_channel(dport);
1805 enum pipe pipe = intel_crtc->pipe;
1806 u32 val;
1807
1808 intel_hdmi_prepare(encoder); 1706 intel_hdmi_prepare(encoder);
1809 1707
1810 /* 1708 chv_phy_pre_pll_enable(encoder);
1811 * Must trick the second common lane into life.
1812 * Otherwise we can't even access the PLL.
1813 */
1814 if (ch == DPIO_CH0 && pipe == PIPE_B)
1815 dport->release_cl2_override =
1816 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
1817
1818 chv_phy_powergate_lanes(encoder, true, 0x0);
1819
1820 mutex_lock(&dev_priv->sb_lock);
1821
1822 /* Assert data lane reset */
1823 chv_data_lane_soft_reset(encoder, true);
1824
1825 /* program left/right clock distribution */
1826 if (pipe != PIPE_B) {
1827 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1828 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1829 if (ch == DPIO_CH0)
1830 val |= CHV_BUFLEFTENA1_FORCE;
1831 if (ch == DPIO_CH1)
1832 val |= CHV_BUFRIGHTENA1_FORCE;
1833 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1834 } else {
1835 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1836 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1837 if (ch == DPIO_CH0)
1838 val |= CHV_BUFLEFTENA2_FORCE;
1839 if (ch == DPIO_CH1)
1840 val |= CHV_BUFRIGHTENA2_FORCE;
1841 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1842 }
1843
1844 /* program clock channel usage */
1845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
1846 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1847 if (pipe != PIPE_B)
1848 val &= ~CHV_PCS_USEDCLKCHANNEL;
1849 else
1850 val |= CHV_PCS_USEDCLKCHANNEL;
1851 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
1852
1853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
1854 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1855 if (pipe != PIPE_B)
1856 val &= ~CHV_PCS_USEDCLKCHANNEL;
1857 else
1858 val |= CHV_PCS_USEDCLKCHANNEL;
1859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
1860
1861 /*
1862 * This a a bit weird since generally CL
1863 * matches the pipe, but here we need to
1864 * pick the CL based on the port.
1865 */
1866 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
1867 if (pipe != PIPE_B)
1868 val &= ~CHV_CMN_USEDCLKCHANNEL;
1869 else
1870 val |= CHV_CMN_USEDCLKCHANNEL;
1871 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
1872
1873 mutex_unlock(&dev_priv->sb_lock);
1874} 1709}
1875 1710
1876static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) 1711static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
1877{ 1712{
1878 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1713 chv_phy_post_pll_disable(encoder);
1879 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
1880 u32 val;
1881
1882 mutex_lock(&dev_priv->sb_lock);
1883
1884 /* disable left/right clock distribution */
1885 if (pipe != PIPE_B) {
1886 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1887 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1888 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1889 } else {
1890 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1891 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1892 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1893 }
1894
1895 mutex_unlock(&dev_priv->sb_lock);
1896
1897 /*
1898 * Leave the power down bit cleared for at least one
1899 * lane so that chv_powergate_phy_ch() will power
1900 * on something when the channel is otherwise unused.
1901 * When the port is off and the override is removed
1902 * the lanes power down anyway, so otherwise it doesn't
1903 * really matter what the state of power down bits is
1904 * after this.
1905 */
1906 chv_phy_powergate_lanes(encoder, false, 0x0);
1907} 1714}
1908 1715
1909static void vlv_hdmi_post_disable(struct intel_encoder *encoder) 1716static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1910{ 1717{
1911 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1912 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1913 struct intel_crtc *intel_crtc =
1914 to_intel_crtc(encoder->base.crtc);
1915 enum dpio_channel port = vlv_dport_to_channel(dport);
1916 int pipe = intel_crtc->pipe;
1917
1918 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1718 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1919 mutex_lock(&dev_priv->sb_lock); 1719 vlv_phy_reset_lanes(encoder);
1920 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
1921 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
1922 mutex_unlock(&dev_priv->sb_lock);
1923} 1720}
1924 1721
1925static void chv_hdmi_post_disable(struct intel_encoder *encoder) 1722static void chv_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1944,138 +1741,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1944 struct intel_crtc *intel_crtc = 1741 struct intel_crtc *intel_crtc =
1945 to_intel_crtc(encoder->base.crtc); 1742 to_intel_crtc(encoder->base.crtc);
1946 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1743 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1947 enum dpio_channel ch = vlv_dport_to_channel(dport);
1948 int pipe = intel_crtc->pipe;
1949 int data, i, stagger;
1950 u32 val;
1951 1744
1952 mutex_lock(&dev_priv->sb_lock); 1745 chv_phy_pre_encoder_enable(encoder);
1953
1954 /* allow hardware to manage TX FIFO reset source */
1955 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1956 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1957 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1958
1959 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1960 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1961 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1962
1963 /* Program Tx latency optimal setting */
1964 for (i = 0; i < 4; i++) {
1965 /* Set the upar bit */
1966 data = (i == 1) ? 0x0 : 0x1;
1967 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
1968 data << DPIO_UPAR_SHIFT);
1969 }
1970
1971 /* Data lane stagger programming */
1972 if (intel_crtc->config->port_clock > 270000)
1973 stagger = 0x18;
1974 else if (intel_crtc->config->port_clock > 135000)
1975 stagger = 0xd;
1976 else if (intel_crtc->config->port_clock > 67500)
1977 stagger = 0x7;
1978 else if (intel_crtc->config->port_clock > 33750)
1979 stagger = 0x4;
1980 else
1981 stagger = 0x2;
1982
1983 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1984 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1985 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1986
1987 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1988 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1989 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1990
1991 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
1992 DPIO_LANESTAGGER_STRAP(stagger) |
1993 DPIO_LANESTAGGER_STRAP_OVRD |
1994 DPIO_TX1_STAGGER_MASK(0x1f) |
1995 DPIO_TX1_STAGGER_MULT(6) |
1996 DPIO_TX2_STAGGER_MULT(0));
1997
1998 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
1999 DPIO_LANESTAGGER_STRAP(stagger) |
2000 DPIO_LANESTAGGER_STRAP_OVRD |
2001 DPIO_TX1_STAGGER_MASK(0x1f) |
2002 DPIO_TX1_STAGGER_MULT(7) |
2003 DPIO_TX2_STAGGER_MULT(5));
2004
2005 /* Deassert data lane reset */
2006 chv_data_lane_soft_reset(encoder, false);
2007
2008 /* Clear calc init */
2009 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2010 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2011 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2012 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2013 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2014
2015 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2016 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2017 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2018 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2019 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2020
2021 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
2022 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2023 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2024 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
2025
2026 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
2027 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2028 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2029 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
2030 1746
2031 /* FIXME: Program the support xxx V-dB */ 1747 /* FIXME: Program the support xxx V-dB */
2032 /* Use 800mV-0dB */ 1748 /* Use 800mV-0dB */
2033 for (i = 0; i < 4; i++) { 1749 chv_set_phy_signal_level(encoder, 128, 102, false);
2034 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2035 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2036 val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
2037 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2038 }
2039
2040 for (i = 0; i < 4; i++) {
2041 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2042
2043 val &= ~DPIO_SWING_MARGIN000_MASK;
2044 val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
2045
2046 /*
2047 * Supposedly this value shouldn't matter when unique transition
2048 * scale is disabled, but in fact it does matter. Let's just
2049 * always program the same value and hope it's OK.
2050 */
2051 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2052 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
2053
2054 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2055 }
2056
2057 /*
2058 * The document said it needs to set bit 27 for ch0 and bit 26
2059 * for ch1. Might be a typo in the doc.
2060 * For now, for this unique transition scale selection, set bit
2061 * 27 for ch0 and ch1.
2062 */
2063 for (i = 0; i < 4; i++) {
2064 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2065 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2066 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2067 }
2068
2069 /* Start swing calculation */
2070 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2071 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2072 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2073
2074 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2075 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2076 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2077
2078 mutex_unlock(&dev_priv->sb_lock);
2079 1750
2080 intel_hdmi->set_infoframes(&encoder->base, 1751 intel_hdmi->set_infoframes(&encoder->base,
2081 intel_crtc->config->has_hdmi_sink, 1752 intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1757,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
2086 vlv_wait_port_ready(dev_priv, dport, 0x0); 1757 vlv_wait_port_ready(dev_priv, dport, 0x0);
2087 1758
2088 /* Second common lane will stay alive on its own now */ 1759 /* Second common lane will stay alive on its own now */
2089 if (dport->release_cl2_override) { 1760 chv_phy_release_cl2_override(encoder);
2090 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2091 dport->release_cl2_override = false;
2092 }
2093} 1761}
2094 1762
2095static void intel_hdmi_destroy(struct drm_connector *connector) 1763static void intel_hdmi_destroy(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index bee673005d48..38eeca7a6e72 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
220 } 220 }
221 } 221 }
222 if (dev_priv->display.hpd_irq_setup) 222 if (dev_priv->display.hpd_irq_setup)
223 dev_priv->display.hpd_irq_setup(dev); 223 dev_priv->display.hpd_irq_setup(dev_priv);
224 spin_unlock_irq(&dev_priv->irq_lock); 224 spin_unlock_irq(&dev_priv->irq_lock);
225 225
226 intel_runtime_pm_put(dev_priv); 226 intel_runtime_pm_put(dev_priv);
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
346 346
347/** 347/**
348 * intel_hpd_irq_handler - main hotplug irq handler 348 * intel_hpd_irq_handler - main hotplug irq handler
349 * @dev: drm device 349 * @dev_priv: drm_i915_private
350 * @pin_mask: a mask of hpd pins that have triggered the irq 350 * @pin_mask: a mask of hpd pins that have triggered the irq
351 * @long_mask: a mask of hpd pins that may be long hpd pulses 351 * @long_mask: a mask of hpd pins that may be long hpd pulses
352 * 352 *
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
360 * Here, we do hotplug irq storm detection and mitigation, and pass further 360 * Here, we do hotplug irq storm detection and mitigation, and pass further
361 * processing to appropriate bottom halves. 361 * processing to appropriate bottom halves.
362 */ 362 */
363void intel_hpd_irq_handler(struct drm_device *dev, 363void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
364 u32 pin_mask, u32 long_mask) 364 u32 pin_mask, u32 long_mask)
365{ 365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 int i; 366 int i;
368 enum port port; 367 enum port port;
369 bool storm_detected = false; 368 bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
407 * hotplug bits itself. So only WARN about unexpected 406 * hotplug bits itself. So only WARN about unexpected
408 * interrupts on saner platforms. 407 * interrupts on saner platforms.
409 */ 408 */
410 WARN_ONCE(!HAS_GMCH_DISPLAY(dev), 409 WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
411 "Received HPD interrupt on pin %d although disabled\n", i); 410 "Received HPD interrupt on pin %d although disabled\n", i);
412 continue; 411 continue;
413 } 412 }
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
427 } 426 }
428 427
429 if (storm_detected) 428 if (storm_detected)
430 dev_priv->display.hpd_irq_setup(dev); 429 dev_priv->display.hpd_irq_setup(dev_priv);
431 spin_unlock(&dev_priv->irq_lock); 430 spin_unlock(&dev_priv->irq_lock);
432 431
433 /* 432 /*
@@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
485 */ 484 */
486 spin_lock_irq(&dev_priv->irq_lock); 485 spin_lock_irq(&dev_priv->irq_lock);
487 if (dev_priv->display.hpd_irq_setup) 486 if (dev_priv->display.hpd_irq_setup)
488 dev_priv->display.hpd_irq_setup(dev); 487 dev_priv->display.hpd_irq_setup(dev_priv);
489 spin_unlock_irq(&dev_priv->irq_lock); 488 spin_unlock_irq(&dev_priv->irq_lock);
490} 489}
491 490
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 42eac37de047..db10c961e0f4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -224,9 +224,15 @@ enum {
224 FAULT_AND_CONTINUE /* Unsupported */ 224 FAULT_AND_CONTINUE /* Unsupported */
225}; 225};
226#define GEN8_CTX_ID_SHIFT 32 226#define GEN8_CTX_ID_SHIFT 32
227#define GEN8_CTX_ID_WIDTH 21
227#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 228#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
228#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 229#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
229 230
231/* Typical size of the average request (2 pipecontrols and a MI_BB) */
232#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
233
234static int execlists_context_deferred_alloc(struct intel_context *ctx,
235 struct intel_engine_cs *engine);
230static int intel_lr_context_pin(struct intel_context *ctx, 236static int intel_lr_context_pin(struct intel_context *ctx,
231 struct intel_engine_cs *engine); 237 struct intel_engine_cs *engine);
232 238
@@ -240,23 +246,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
240 * 246 *
241 * Return: 1 if Execlists is supported and has to be enabled. 247 * Return: 1 if Execlists is supported and has to be enabled.
242 */ 248 */
243int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) 249int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
244{ 250{
245 WARN_ON(i915.enable_ppgtt == -1);
246
247 /* On platforms with execlist available, vGPU will only 251 /* On platforms with execlist available, vGPU will only
248 * support execlist mode, no ring buffer mode. 252 * support execlist mode, no ring buffer mode.
249 */ 253 */
250 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) 254 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
251 return 1; 255 return 1;
252 256
253 if (INTEL_INFO(dev)->gen >= 9) 257 if (INTEL_GEN(dev_priv) >= 9)
254 return 1; 258 return 1;
255 259
256 if (enable_execlists == 0) 260 if (enable_execlists == 0)
257 return 0; 261 return 0;
258 262
259 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && 263 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
264 USES_PPGTT(dev_priv) &&
260 i915.use_mmio_flip >= 0) 265 i915.use_mmio_flip >= 0)
261 return 1; 266 return 1;
262 267
@@ -266,19 +271,19 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
266static void 271static void
267logical_ring_init_platform_invariants(struct intel_engine_cs *engine) 272logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
268{ 273{
269 struct drm_device *dev = engine->dev; 274 struct drm_i915_private *dev_priv = engine->i915;
270 275
271 if (IS_GEN8(dev) || IS_GEN9(dev)) 276 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
272 engine->idle_lite_restore_wa = ~0; 277 engine->idle_lite_restore_wa = ~0;
273 278
274 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 279 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
275 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && 280 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
276 (engine->id == VCS || engine->id == VCS2); 281 (engine->id == VCS || engine->id == VCS2);
277 282
278 engine->ctx_desc_template = GEN8_CTX_VALID; 283 engine->ctx_desc_template = GEN8_CTX_VALID;
279 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << 284 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
280 GEN8_CTX_ADDRESSING_MODE_SHIFT; 285 GEN8_CTX_ADDRESSING_MODE_SHIFT;
281 if (IS_GEN8(dev)) 286 if (IS_GEN8(dev_priv))
282 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 287 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
283 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 288 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
284 289
@@ -305,23 +310,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
305 * which remains valid until the context is unpinned. 310 * which remains valid until the context is unpinned.
306 * 311 *
307 * This is what a descriptor looks like, from LSB to MSB: 312 * This is what a descriptor looks like, from LSB to MSB:
308 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) 313 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
309 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 314 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
310 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) 315 * bits 32-52: ctx ID, a globally unique tag
311 * bits 52-63: reserved, may encode the engine ID (for GuC) 316 * bits 53-54: mbz, reserved for use by hardware
317 * bits 55-63: group ID, currently unused and set to 0
312 */ 318 */
313static void 319static void
314intel_lr_context_descriptor_update(struct intel_context *ctx, 320intel_lr_context_descriptor_update(struct intel_context *ctx,
315 struct intel_engine_cs *engine) 321 struct intel_engine_cs *engine)
316{ 322{
317 uint64_t lrca, desc; 323 u64 desc;
318 324
319 lrca = ctx->engine[engine->id].lrc_vma->node.start + 325 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
320 LRC_PPHWSP_PN * PAGE_SIZE;
321 326
322 desc = engine->ctx_desc_template; /* bits 0-11 */ 327 desc = engine->ctx_desc_template; /* bits 0-11 */
323 desc |= lrca; /* bits 12-31 */ 328 desc |= ctx->engine[engine->id].lrc_vma->node.start + /* bits 12-31 */
324 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ 329 LRC_PPHWSP_PN * PAGE_SIZE;
330 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
325 331
326 ctx->engine[engine->id].lrc_desc = desc; 332 ctx->engine[engine->id].lrc_desc = desc;
327} 333}
@@ -332,35 +338,12 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
332 return ctx->engine[engine->id].lrc_desc; 338 return ctx->engine[engine->id].lrc_desc;
333} 339}
334 340
335/**
336 * intel_execlists_ctx_id() - get the Execlists Context ID
337 * @ctx: Context to get the ID for
338 * @ring: Engine to get the ID for
339 *
340 * Do not confuse with ctx->id! Unfortunately we have a name overload
341 * here: the old context ID we pass to userspace as a handler so that
342 * they can refer to a context, and the new context ID we pass to the
343 * ELSP so that the GPU can inform us of the context status via
344 * interrupts.
345 *
346 * The context ID is a portion of the context descriptor, so we can
347 * just extract the required part from the cached descriptor.
348 *
349 * Return: 20-bits globally unique context ID.
350 */
351u32 intel_execlists_ctx_id(struct intel_context *ctx,
352 struct intel_engine_cs *engine)
353{
354 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
355}
356
357static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 341static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
358 struct drm_i915_gem_request *rq1) 342 struct drm_i915_gem_request *rq1)
359{ 343{
360 344
361 struct intel_engine_cs *engine = rq0->engine; 345 struct intel_engine_cs *engine = rq0->engine;
362 struct drm_device *dev = engine->dev; 346 struct drm_i915_private *dev_priv = rq0->i915;
363 struct drm_i915_private *dev_priv = dev->dev_private;
364 uint64_t desc[2]; 347 uint64_t desc[2];
365 348
366 if (rq1) { 349 if (rq1) {
@@ -442,7 +425,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
442 * If irqs are not active generate a warning as batches that finish 425 * If irqs are not active generate a warning as batches that finish
443 * without the irqs may get lost and a GPU Hang may occur. 426 * without the irqs may get lost and a GPU Hang may occur.
444 */ 427 */
445 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); 428 WARN_ON(!intel_irqs_enabled(engine->i915));
446 429
447 /* Try to read in pairs */ 430 /* Try to read in pairs */
448 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, 431 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,8 +436,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
453 /* Same ctx: ignore first request, as second request 436 /* Same ctx: ignore first request, as second request
454 * will update tail past first request's workload */ 437 * will update tail past first request's workload */
455 cursor->elsp_submitted = req0->elsp_submitted; 438 cursor->elsp_submitted = req0->elsp_submitted;
456 list_move_tail(&req0->execlist_link, 439 list_del(&req0->execlist_link);
457 &engine->execlist_retired_req_list); 440 i915_gem_request_unreference(req0);
458 req0 = cursor; 441 req0 = cursor;
459 } else { 442 } else {
460 req1 = cursor; 443 req1 = cursor;
@@ -486,7 +469,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
486} 469}
487 470
488static unsigned int 471static unsigned int
489execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) 472execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
490{ 473{
491 struct drm_i915_gem_request *head_req; 474 struct drm_i915_gem_request *head_req;
492 475
@@ -496,19 +479,16 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
496 struct drm_i915_gem_request, 479 struct drm_i915_gem_request,
497 execlist_link); 480 execlist_link);
498 481
499 if (!head_req) 482 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
500 return 0; 483 return 0;
501
502 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
503 return 0;
504 484
505 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); 485 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
506 486
507 if (--head_req->elsp_submitted > 0) 487 if (--head_req->elsp_submitted > 0)
508 return 0; 488 return 0;
509 489
510 list_move_tail(&head_req->execlist_link, 490 list_del(&head_req->execlist_link);
511 &engine->execlist_retired_req_list); 491 i915_gem_request_unreference(head_req);
512 492
513 return 1; 493 return 1;
514} 494}
@@ -517,7 +497,7 @@ static u32
517get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, 497get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
518 u32 *context_id) 498 u32 *context_id)
519{ 499{
520 struct drm_i915_private *dev_priv = engine->dev->dev_private; 500 struct drm_i915_private *dev_priv = engine->i915;
521 u32 status; 501 u32 status;
522 502
523 read_pointer %= GEN8_CSB_ENTRIES; 503 read_pointer %= GEN8_CSB_ENTRIES;
@@ -543,7 +523,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
543static void intel_lrc_irq_handler(unsigned long data) 523static void intel_lrc_irq_handler(unsigned long data)
544{ 524{
545 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 525 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
546 struct drm_i915_private *dev_priv = engine->dev->dev_private; 526 struct drm_i915_private *dev_priv = engine->i915;
547 u32 status_pointer; 527 u32 status_pointer;
548 unsigned int read_pointer, write_pointer; 528 unsigned int read_pointer, write_pointer;
549 u32 csb[GEN8_CSB_ENTRIES][2]; 529 u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +592,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
612 struct drm_i915_gem_request *cursor; 592 struct drm_i915_gem_request *cursor;
613 int num_elements = 0; 593 int num_elements = 0;
614 594
615 if (request->ctx != request->i915->kernel_context)
616 intel_lr_context_pin(request->ctx, engine);
617
618 i915_gem_request_reference(request);
619
620 spin_lock_bh(&engine->execlist_lock); 595 spin_lock_bh(&engine->execlist_lock);
621 596
622 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) 597 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +608,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
633 if (request->ctx == tail_req->ctx) { 608 if (request->ctx == tail_req->ctx) {
634 WARN(tail_req->elsp_submitted != 0, 609 WARN(tail_req->elsp_submitted != 0,
635 "More than 2 already-submitted reqs queued\n"); 610 "More than 2 already-submitted reqs queued\n");
636 list_move_tail(&tail_req->execlist_link, 611 list_del(&tail_req->execlist_link);
637 &engine->execlist_retired_req_list); 612 i915_gem_request_unreference(tail_req);
638 } 613 }
639 } 614 }
640 615
616 i915_gem_request_reference(request);
641 list_add_tail(&request->execlist_link, &engine->execlist_queue); 617 list_add_tail(&request->execlist_link, &engine->execlist_queue);
618 request->ctx_hw_id = request->ctx->hw_id;
642 if (num_elements == 0) 619 if (num_elements == 0)
643 execlists_context_unqueue(engine); 620 execlists_context_unqueue(engine);
644 621
@@ -698,9 +675,22 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
698 675
699int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 676int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
700{ 677{
701 int ret = 0; 678 struct intel_engine_cs *engine = request->engine;
679 int ret;
702 680
703 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; 681 /* Flush enough space to reduce the likelihood of waiting after
682 * we start building the request - in which case we will just
683 * have to repeat work.
684 */
685 request->reserved_space += EXECLISTS_REQUEST_SIZE;
686
687 if (request->ctx->engine[engine->id].state == NULL) {
688 ret = execlists_context_deferred_alloc(request->ctx, engine);
689 if (ret)
690 return ret;
691 }
692
693 request->ringbuf = request->ctx->engine[engine->id].ringbuf;
704 694
705 if (i915.enable_guc_submission) { 695 if (i915.enable_guc_submission) {
706 /* 696 /*
@@ -715,9 +705,34 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
715 return ret; 705 return ret;
716 } 706 }
717 707
718 if (request->ctx != request->i915->kernel_context) 708 ret = intel_lr_context_pin(request->ctx, engine);
719 ret = intel_lr_context_pin(request->ctx, request->engine); 709 if (ret)
710 return ret;
711
712 ret = intel_ring_begin(request, 0);
713 if (ret)
714 goto err_unpin;
715
716 if (!request->ctx->engine[engine->id].initialised) {
717 ret = engine->init_context(request);
718 if (ret)
719 goto err_unpin;
720
721 request->ctx->engine[engine->id].initialised = true;
722 }
723
724 /* Note that after this point, we have committed to using
725 * this request as it is being used to both track the
726 * state of engine initialisation and liveness of the
727 * golden renderstate above. Think twice before you try
728 * to cancel/unwind this request now.
729 */
730
731 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
732 return 0;
720 733
734err_unpin:
735 intel_lr_context_unpin(request->ctx, engine);
721 return ret; 736 return ret;
722} 737}
723 738
@@ -753,16 +768,14 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
753 if (intel_engine_stopped(engine)) 768 if (intel_engine_stopped(engine))
754 return 0; 769 return 0;
755 770
756 if (engine->last_context != request->ctx) { 771 /* We keep the previous context alive until we retire the following
757 if (engine->last_context) 772 * request. This ensures that any the context object is still pinned
758 intel_lr_context_unpin(engine->last_context, engine); 773 * for any residual writes the HW makes into it on the context switch
759 if (request->ctx != request->i915->kernel_context) { 774 * into the next object following the breadcrumb. Otherwise, we may
760 intel_lr_context_pin(request->ctx, engine); 775 * retire the context too early.
761 engine->last_context = request->ctx; 776 */
762 } else { 777 request->previous_context = engine->last_context;
763 engine->last_context = NULL; 778 engine->last_context = request->ctx;
764 }
765 }
766 779
767 if (dev_priv->guc.execbuf_client) 780 if (dev_priv->guc.execbuf_client)
768 i915_guc_submit(dev_priv->guc.execbuf_client, request); 781 i915_guc_submit(dev_priv->guc.execbuf_client, request);
@@ -772,21 +785,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
772 return 0; 785 return 0;
773} 786}
774 787
775int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
776{
777 /*
778 * The first call merely notes the reserve request and is common for
779 * all back ends. The subsequent localised _begin() call actually
780 * ensures that the reservation is available. Without the begin, if
781 * the request creator immediately submitted the request without
782 * adding any commands to it then there might not actually be
783 * sufficient room for the submission commands.
784 */
785 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
786
787 return intel_ring_begin(request, 0);
788}
789
790/** 788/**
791 * execlists_submission() - submit a batchbuffer for execution, Execlists style 789 * execlists_submission() - submit a batchbuffer for execution, Execlists style
792 * @dev: DRM device. 790 * @dev: DRM device.
@@ -881,28 +879,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
881 return 0; 879 return 0;
882} 880}
883 881
884void intel_execlists_retire_requests(struct intel_engine_cs *engine) 882void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
885{ 883{
886 struct drm_i915_gem_request *req, *tmp; 884 struct drm_i915_gem_request *req, *tmp;
887 struct list_head retired_list; 885 LIST_HEAD(cancel_list);
888 886
889 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 887 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
890 if (list_empty(&engine->execlist_retired_req_list))
891 return;
892 888
893 INIT_LIST_HEAD(&retired_list);
894 spin_lock_bh(&engine->execlist_lock); 889 spin_lock_bh(&engine->execlist_lock);
895 list_replace_init(&engine->execlist_retired_req_list, &retired_list); 890 list_replace_init(&engine->execlist_queue, &cancel_list);
896 spin_unlock_bh(&engine->execlist_lock); 891 spin_unlock_bh(&engine->execlist_lock);
897 892
898 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 893 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
899 struct intel_context *ctx = req->ctx;
900 struct drm_i915_gem_object *ctx_obj =
901 ctx->engine[engine->id].state;
902
903 if (ctx_obj && (ctx != req->i915->kernel_context))
904 intel_lr_context_unpin(ctx, engine);
905
906 list_del(&req->execlist_link); 894 list_del(&req->execlist_link);
907 i915_gem_request_unreference(req); 895 i915_gem_request_unreference(req);
908 } 896 }
@@ -910,7 +898,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
910 898
911void intel_logical_ring_stop(struct intel_engine_cs *engine) 899void intel_logical_ring_stop(struct intel_engine_cs *engine)
912{ 900{
913 struct drm_i915_private *dev_priv = engine->dev->dev_private; 901 struct drm_i915_private *dev_priv = engine->i915;
914 int ret; 902 int ret;
915 903
916 if (!intel_engine_initialized(engine)) 904 if (!intel_engine_initialized(engine))
@@ -946,23 +934,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
946 return 0; 934 return 0;
947} 935}
948 936
949static int intel_lr_context_do_pin(struct intel_context *ctx, 937static int intel_lr_context_pin(struct intel_context *ctx,
950 struct intel_engine_cs *engine) 938 struct intel_engine_cs *engine)
951{ 939{
952 struct drm_device *dev = engine->dev; 940 struct drm_i915_private *dev_priv = ctx->i915;
953 struct drm_i915_private *dev_priv = dev->dev_private; 941 struct drm_i915_gem_object *ctx_obj;
954 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; 942 struct intel_ringbuffer *ringbuf;
955 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
956 void *vaddr; 943 void *vaddr;
957 u32 *lrc_reg_state; 944 u32 *lrc_reg_state;
958 int ret; 945 int ret;
959 946
960 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 947 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
961 948
949 if (ctx->engine[engine->id].pin_count++)
950 return 0;
951
952 ctx_obj = ctx->engine[engine->id].state;
962 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 953 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
963 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 954 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
964 if (ret) 955 if (ret)
965 return ret; 956 goto err;
966 957
967 vaddr = i915_gem_object_pin_map(ctx_obj); 958 vaddr = i915_gem_object_pin_map(ctx_obj);
968 if (IS_ERR(vaddr)) { 959 if (IS_ERR(vaddr)) {
@@ -972,10 +963,12 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
972 963
973 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 964 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
974 965
975 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); 966 ringbuf = ctx->engine[engine->id].ringbuf;
967 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
976 if (ret) 968 if (ret)
977 goto unpin_map; 969 goto unpin_map;
978 970
971 i915_gem_context_reference(ctx);
979 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 972 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
980 intel_lr_context_descriptor_update(ctx, engine); 973 intel_lr_context_descriptor_update(ctx, engine);
981 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 974 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
@@ -986,51 +979,39 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
986 if (i915.enable_guc_submission) 979 if (i915.enable_guc_submission)
987 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 980 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
988 981
989 return ret; 982 return 0;
990 983
991unpin_map: 984unpin_map:
992 i915_gem_object_unpin_map(ctx_obj); 985 i915_gem_object_unpin_map(ctx_obj);
993unpin_ctx_obj: 986unpin_ctx_obj:
994 i915_gem_object_ggtt_unpin(ctx_obj); 987 i915_gem_object_ggtt_unpin(ctx_obj);
995 988err:
989 ctx->engine[engine->id].pin_count = 0;
996 return ret; 990 return ret;
997} 991}
998 992
999static int intel_lr_context_pin(struct intel_context *ctx, 993void intel_lr_context_unpin(struct intel_context *ctx,
1000 struct intel_engine_cs *engine) 994 struct intel_engine_cs *engine)
1001{ 995{
1002 int ret = 0; 996 struct drm_i915_gem_object *ctx_obj;
1003 997
1004 if (ctx->engine[engine->id].pin_count++ == 0) { 998 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
1005 ret = intel_lr_context_do_pin(ctx, engine); 999 GEM_BUG_ON(ctx->engine[engine->id].pin_count == 0);
1006 if (ret)
1007 goto reset_pin_count;
1008 1000
1009 i915_gem_context_reference(ctx); 1001 if (--ctx->engine[engine->id].pin_count)
1010 } 1002 return;
1011 return ret;
1012 1003
1013reset_pin_count: 1004 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
1014 ctx->engine[engine->id].pin_count = 0;
1015 return ret;
1016}
1017 1005
1018void intel_lr_context_unpin(struct intel_context *ctx, 1006 ctx_obj = ctx->engine[engine->id].state;
1019 struct intel_engine_cs *engine) 1007 i915_gem_object_unpin_map(ctx_obj);
1020{ 1008 i915_gem_object_ggtt_unpin(ctx_obj);
1021 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1022 1009
1023 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); 1010 ctx->engine[engine->id].lrc_vma = NULL;
1024 if (--ctx->engine[engine->id].pin_count == 0) { 1011 ctx->engine[engine->id].lrc_desc = 0;
1025 i915_gem_object_unpin_map(ctx_obj); 1012 ctx->engine[engine->id].lrc_reg_state = NULL;
1026 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
1027 i915_gem_object_ggtt_unpin(ctx_obj);
1028 ctx->engine[engine->id].lrc_vma = NULL;
1029 ctx->engine[engine->id].lrc_desc = 0;
1030 ctx->engine[engine->id].lrc_reg_state = NULL;
1031 1013
1032 i915_gem_context_unreference(ctx); 1014 i915_gem_context_unreference(ctx);
1033 }
1034} 1015}
1035 1016
1036static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 1017static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1019,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1038 int ret, i; 1019 int ret, i;
1039 struct intel_engine_cs *engine = req->engine; 1020 struct intel_engine_cs *engine = req->engine;
1040 struct intel_ringbuffer *ringbuf = req->ringbuf; 1021 struct intel_ringbuffer *ringbuf = req->ringbuf;
1041 struct drm_device *dev = engine->dev; 1022 struct i915_workarounds *w = &req->i915->workarounds;
1042 struct drm_i915_private *dev_priv = dev->dev_private;
1043 struct i915_workarounds *w = &dev_priv->workarounds;
1044 1023
1045 if (w->count == 0) 1024 if (w->count == 0)
1046 return 0; 1025 return 0;
@@ -1111,7 +1090,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1111 * this batch updates GEN8_L3SQCREG4 with default value we need to 1090 * this batch updates GEN8_L3SQCREG4 with default value we need to
1112 * set this bit here to retain the WA during flush. 1091 * set this bit here to retain the WA during flush.
1113 */ 1092 */
1114 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) 1093 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
1115 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1094 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1116 1095
1117 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1096 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1200,7 +1179,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1200 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1179 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1201 1180
1202 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1181 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1203 if (IS_BROADWELL(engine->dev)) { 1182 if (IS_BROADWELL(engine->i915)) {
1204 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1183 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1205 if (rc < 0) 1184 if (rc < 0)
1206 return rc; 1185 return rc;
@@ -1272,12 +1251,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1272 uint32_t *offset) 1251 uint32_t *offset)
1273{ 1252{
1274 int ret; 1253 int ret;
1275 struct drm_device *dev = engine->dev;
1276 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1254 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1277 1255
1278 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1256 /* WaDisableCtxRestoreArbitration:skl,bxt */
1279 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1257 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1280 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1258 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1281 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1259 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1282 1260
1283 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1261 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1298,12 +1276,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1298 uint32_t *const batch, 1276 uint32_t *const batch,
1299 uint32_t *offset) 1277 uint32_t *offset)
1300{ 1278{
1301 struct drm_device *dev = engine->dev;
1302 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1279 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1303 1280
1304 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1281 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1305 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 1282 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1306 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1283 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1307 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1284 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1308 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1285 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1309 wa_ctx_emit(batch, index, 1286 wa_ctx_emit(batch, index,
@@ -1312,7 +1289,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1312 } 1289 }
1313 1290
1314 /* WaClearTdlStateAckDirtyBits:bxt */ 1291 /* WaClearTdlStateAckDirtyBits:bxt */
1315 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1292 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1316 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); 1293 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1317 1294
1318 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); 1295 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1331,8 +1308,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1331 } 1308 }
1332 1309
1333 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1310 /* WaDisableCtxRestoreArbitration:skl,bxt */
1334 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1311 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1335 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1312 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1336 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1313 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1337 1314
1338 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1315 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1344,11 +1321,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1344{ 1321{
1345 int ret; 1322 int ret;
1346 1323
1347 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, 1324 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
1348 PAGE_ALIGN(size)); 1325 PAGE_ALIGN(size));
1349 if (!engine->wa_ctx.obj) { 1326 if (IS_ERR(engine->wa_ctx.obj)) {
1350 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1327 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1351 return -ENOMEM; 1328 ret = PTR_ERR(engine->wa_ctx.obj);
1329 engine->wa_ctx.obj = NULL;
1330 return ret;
1352 } 1331 }
1353 1332
1354 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); 1333 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1382,9 +1361,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1382 WARN_ON(engine->id != RCS); 1361 WARN_ON(engine->id != RCS);
1383 1362
1384 /* update this when WA for higher Gen are added */ 1363 /* update this when WA for higher Gen are added */
1385 if (INTEL_INFO(engine->dev)->gen > 9) { 1364 if (INTEL_GEN(engine->i915) > 9) {
1386 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1365 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1387 INTEL_INFO(engine->dev)->gen); 1366 INTEL_GEN(engine->i915));
1388 return 0; 1367 return 0;
1389 } 1368 }
1390 1369
@@ -1404,7 +1383,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1404 batch = kmap_atomic(page); 1383 batch = kmap_atomic(page);
1405 offset = 0; 1384 offset = 0;
1406 1385
1407 if (INTEL_INFO(engine->dev)->gen == 8) { 1386 if (IS_GEN8(engine->i915)) {
1408 ret = gen8_init_indirectctx_bb(engine, 1387 ret = gen8_init_indirectctx_bb(engine,
1409 &wa_ctx->indirect_ctx, 1388 &wa_ctx->indirect_ctx,
1410 batch, 1389 batch,
@@ -1418,7 +1397,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1418 &offset); 1397 &offset);
1419 if (ret) 1398 if (ret)
1420 goto out; 1399 goto out;
1421 } else if (INTEL_INFO(engine->dev)->gen == 9) { 1400 } else if (IS_GEN9(engine->i915)) {
1422 ret = gen9_init_indirectctx_bb(engine, 1401 ret = gen9_init_indirectctx_bb(engine,
1423 &wa_ctx->indirect_ctx, 1402 &wa_ctx->indirect_ctx,
1424 batch, 1403 batch,
@@ -1444,7 +1423,7 @@ out:
1444 1423
1445static void lrc_init_hws(struct intel_engine_cs *engine) 1424static void lrc_init_hws(struct intel_engine_cs *engine)
1446{ 1425{
1447 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1426 struct drm_i915_private *dev_priv = engine->i915;
1448 1427
1449 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 1428 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1450 (u32)engine->status_page.gfx_addr); 1429 (u32)engine->status_page.gfx_addr);
@@ -1453,8 +1432,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
1453 1432
1454static int gen8_init_common_ring(struct intel_engine_cs *engine) 1433static int gen8_init_common_ring(struct intel_engine_cs *engine)
1455{ 1434{
1456 struct drm_device *dev = engine->dev; 1435 struct drm_i915_private *dev_priv = engine->i915;
1457 struct drm_i915_private *dev_priv = dev->dev_private;
1458 unsigned int next_context_status_buffer_hw; 1436 unsigned int next_context_status_buffer_hw;
1459 1437
1460 lrc_init_hws(engine); 1438 lrc_init_hws(engine);
@@ -1501,8 +1479,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1501 1479
1502static int gen8_init_render_ring(struct intel_engine_cs *engine) 1480static int gen8_init_render_ring(struct intel_engine_cs *engine)
1503{ 1481{
1504 struct drm_device *dev = engine->dev; 1482 struct drm_i915_private *dev_priv = engine->i915;
1505 struct drm_i915_private *dev_priv = dev->dev_private;
1506 int ret; 1483 int ret;
1507 1484
1508 ret = gen8_init_common_ring(engine); 1485 ret = gen8_init_common_ring(engine);
@@ -1579,7 +1556,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1579 if (req->ctx->ppgtt && 1556 if (req->ctx->ppgtt &&
1580 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { 1557 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1581 if (!USES_FULL_48BIT_PPGTT(req->i915) && 1558 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1582 !intel_vgpu_active(req->i915->dev)) { 1559 !intel_vgpu_active(req->i915)) {
1583 ret = intel_logical_ring_emit_pdps(req); 1560 ret = intel_logical_ring_emit_pdps(req);
1584 if (ret) 1561 if (ret)
1585 return ret; 1562 return ret;
@@ -1607,8 +1584,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1607 1584
1608static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) 1585static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1609{ 1586{
1610 struct drm_device *dev = engine->dev; 1587 struct drm_i915_private *dev_priv = engine->i915;
1611 struct drm_i915_private *dev_priv = dev->dev_private;
1612 unsigned long flags; 1588 unsigned long flags;
1613 1589
1614 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1590 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1627,8 +1603,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1627 1603
1628static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) 1604static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1629{ 1605{
1630 struct drm_device *dev = engine->dev; 1606 struct drm_i915_private *dev_priv = engine->i915;
1631 struct drm_i915_private *dev_priv = dev->dev_private;
1632 unsigned long flags; 1607 unsigned long flags;
1633 1608
1634 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1609 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1645,8 +1620,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
1645{ 1620{
1646 struct intel_ringbuffer *ringbuf = request->ringbuf; 1621 struct intel_ringbuffer *ringbuf = request->ringbuf;
1647 struct intel_engine_cs *engine = ringbuf->engine; 1622 struct intel_engine_cs *engine = ringbuf->engine;
1648 struct drm_device *dev = engine->dev; 1623 struct drm_i915_private *dev_priv = request->i915;
1649 struct drm_i915_private *dev_priv = dev->dev_private;
1650 uint32_t cmd; 1624 uint32_t cmd;
1651 int ret; 1625 int ret;
1652 1626
@@ -1714,7 +1688,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1714 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1688 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1715 * pipe control. 1689 * pipe control.
1716 */ 1690 */
1717 if (IS_GEN9(engine->dev)) 1691 if (IS_GEN9(request->i915))
1718 vf_flush_wa = true; 1692 vf_flush_wa = true;
1719 } 1693 }
1720 1694
@@ -1782,11 +1756,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1782 */ 1756 */
1783#define WA_TAIL_DWORDS 2 1757#define WA_TAIL_DWORDS 2
1784 1758
1785static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1786{
1787 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1788}
1789
1790static int gen8_emit_request(struct drm_i915_gem_request *request) 1759static int gen8_emit_request(struct drm_i915_gem_request *request)
1791{ 1760{
1792 struct intel_ringbuffer *ringbuf = request->ringbuf; 1761 struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1802,7 +1771,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
1802 intel_logical_ring_emit(ringbuf, 1771 intel_logical_ring_emit(ringbuf,
1803 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); 1772 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1804 intel_logical_ring_emit(ringbuf, 1773 intel_logical_ring_emit(ringbuf,
1805 hws_seqno_address(request->engine) | 1774 intel_hws_seqno_address(request->engine) |
1806 MI_FLUSH_DW_USE_GTT); 1775 MI_FLUSH_DW_USE_GTT);
1807 intel_logical_ring_emit(ringbuf, 0); 1776 intel_logical_ring_emit(ringbuf, 0);
1808 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1777 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1832,7 +1801,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1832 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1801 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1833 PIPE_CONTROL_CS_STALL | 1802 PIPE_CONTROL_CS_STALL |
1834 PIPE_CONTROL_QW_WRITE)); 1803 PIPE_CONTROL_QW_WRITE));
1835 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); 1804 intel_logical_ring_emit(ringbuf,
1805 intel_hws_seqno_address(request->engine));
1836 intel_logical_ring_emit(ringbuf, 0); 1806 intel_logical_ring_emit(ringbuf, 0);
1837 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1807 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1838 /* We're thrashing one dword of HWS. */ 1808 /* We're thrashing one dword of HWS. */
@@ -1911,7 +1881,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1911 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1881 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1912 tasklet_kill(&engine->irq_tasklet); 1882 tasklet_kill(&engine->irq_tasklet);
1913 1883
1914 dev_priv = engine->dev->dev_private; 1884 dev_priv = engine->i915;
1915 1885
1916 if (engine->buffer) { 1886 if (engine->buffer) {
1917 intel_logical_ring_stop(engine); 1887 intel_logical_ring_stop(engine);
@@ -1928,18 +1898,18 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1928 i915_gem_object_unpin_map(engine->status_page.obj); 1898 i915_gem_object_unpin_map(engine->status_page.obj);
1929 engine->status_page.obj = NULL; 1899 engine->status_page.obj = NULL;
1930 } 1900 }
1901 intel_lr_context_unpin(dev_priv->kernel_context, engine);
1931 1902
1932 engine->idle_lite_restore_wa = 0; 1903 engine->idle_lite_restore_wa = 0;
1933 engine->disable_lite_restore_wa = false; 1904 engine->disable_lite_restore_wa = false;
1934 engine->ctx_desc_template = 0; 1905 engine->ctx_desc_template = 0;
1935 1906
1936 lrc_destroy_wa_ctx_obj(engine); 1907 lrc_destroy_wa_ctx_obj(engine);
1937 engine->dev = NULL; 1908 engine->i915 = NULL;
1938} 1909}
1939 1910
1940static void 1911static void
1941logical_ring_default_vfuncs(struct drm_device *dev, 1912logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1942 struct intel_engine_cs *engine)
1943{ 1913{
1944 /* Default vfuncs which can be overriden by each engine. */ 1914 /* Default vfuncs which can be overriden by each engine. */
1945 engine->init_hw = gen8_init_common_ring; 1915 engine->init_hw = gen8_init_common_ring;
@@ -1950,7 +1920,7 @@ logical_ring_default_vfuncs(struct drm_device *dev,
1950 engine->emit_bb_start = gen8_emit_bb_start; 1920 engine->emit_bb_start = gen8_emit_bb_start;
1951 engine->get_seqno = gen8_get_seqno; 1921 engine->get_seqno = gen8_get_seqno;
1952 engine->set_seqno = gen8_set_seqno; 1922 engine->set_seqno = gen8_set_seqno;
1953 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1923 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1954 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 1924 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1955 engine->set_seqno = bxt_a_set_seqno; 1925 engine->set_seqno = bxt_a_set_seqno;
1956 } 1926 }
@@ -1961,6 +1931,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
1961{ 1931{
1962 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 1932 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1963 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 1933 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1934 init_waitqueue_head(&engine->irq_queue);
1964} 1935}
1965 1936
1966static int 1937static int
@@ -1981,32 +1952,68 @@ lrc_setup_hws(struct intel_engine_cs *engine,
1981 return 0; 1952 return 0;
1982} 1953}
1983 1954
1984static int 1955static const struct logical_ring_info {
1985logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) 1956 const char *name;
1957 unsigned exec_id;
1958 unsigned guc_id;
1959 u32 mmio_base;
1960 unsigned irq_shift;
1961} logical_rings[] = {
1962 [RCS] = {
1963 .name = "render ring",
1964 .exec_id = I915_EXEC_RENDER,
1965 .guc_id = GUC_RENDER_ENGINE,
1966 .mmio_base = RENDER_RING_BASE,
1967 .irq_shift = GEN8_RCS_IRQ_SHIFT,
1968 },
1969 [BCS] = {
1970 .name = "blitter ring",
1971 .exec_id = I915_EXEC_BLT,
1972 .guc_id = GUC_BLITTER_ENGINE,
1973 .mmio_base = BLT_RING_BASE,
1974 .irq_shift = GEN8_BCS_IRQ_SHIFT,
1975 },
1976 [VCS] = {
1977 .name = "bsd ring",
1978 .exec_id = I915_EXEC_BSD,
1979 .guc_id = GUC_VIDEO_ENGINE,
1980 .mmio_base = GEN6_BSD_RING_BASE,
1981 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
1982 },
1983 [VCS2] = {
1984 .name = "bsd2 ring",
1985 .exec_id = I915_EXEC_BSD,
1986 .guc_id = GUC_VIDEO_ENGINE2,
1987 .mmio_base = GEN8_BSD2_RING_BASE,
1988 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
1989 },
1990 [VECS] = {
1991 .name = "video enhancement ring",
1992 .exec_id = I915_EXEC_VEBOX,
1993 .guc_id = GUC_VIDEOENHANCE_ENGINE,
1994 .mmio_base = VEBOX_RING_BASE,
1995 .irq_shift = GEN8_VECS_IRQ_SHIFT,
1996 },
1997};
1998
1999static struct intel_engine_cs *
2000logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
1986{ 2001{
2002 const struct logical_ring_info *info = &logical_rings[id];
1987 struct drm_i915_private *dev_priv = to_i915(dev); 2003 struct drm_i915_private *dev_priv = to_i915(dev);
1988 struct intel_context *dctx = dev_priv->kernel_context; 2004 struct intel_engine_cs *engine = &dev_priv->engine[id];
1989 enum forcewake_domains fw_domains; 2005 enum forcewake_domains fw_domains;
1990 int ret;
1991 2006
1992 /* Intentionally left blank. */ 2007 engine->id = id;
1993 engine->buffer = NULL; 2008 engine->name = info->name;
2009 engine->exec_id = info->exec_id;
2010 engine->guc_id = info->guc_id;
2011 engine->mmio_base = info->mmio_base;
1994 2012
1995 engine->dev = dev; 2013 engine->i915 = dev_priv;
1996 INIT_LIST_HEAD(&engine->active_list);
1997 INIT_LIST_HEAD(&engine->request_list);
1998 i915_gem_batch_pool_init(dev, &engine->batch_pool);
1999 init_waitqueue_head(&engine->irq_queue);
2000 2014
2001 INIT_LIST_HEAD(&engine->buffers); 2015 /* Intentionally left blank. */
2002 INIT_LIST_HEAD(&engine->execlist_queue); 2016 engine->buffer = NULL;
2003 INIT_LIST_HEAD(&engine->execlist_retired_req_list);
2004 spin_lock_init(&engine->execlist_lock);
2005
2006 tasklet_init(&engine->irq_tasklet,
2007 intel_lrc_irq_handler, (unsigned long)engine);
2008
2009 logical_ring_init_platform_invariants(engine);
2010 2017
2011 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, 2018 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2012 RING_ELSP(engine), 2019 RING_ELSP(engine),
@@ -2022,20 +2029,44 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
2022 2029
2023 engine->fw_domains = fw_domains; 2030 engine->fw_domains = fw_domains;
2024 2031
2032 INIT_LIST_HEAD(&engine->active_list);
2033 INIT_LIST_HEAD(&engine->request_list);
2034 INIT_LIST_HEAD(&engine->buffers);
2035 INIT_LIST_HEAD(&engine->execlist_queue);
2036 spin_lock_init(&engine->execlist_lock);
2037
2038 tasklet_init(&engine->irq_tasklet,
2039 intel_lrc_irq_handler, (unsigned long)engine);
2040
2041 logical_ring_init_platform_invariants(engine);
2042 logical_ring_default_vfuncs(engine);
2043 logical_ring_default_irqs(engine, info->irq_shift);
2044
2045 intel_engine_init_hangcheck(engine);
2046 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2047
2048 return engine;
2049}
2050
2051static int
2052logical_ring_init(struct intel_engine_cs *engine)
2053{
2054 struct intel_context *dctx = engine->i915->kernel_context;
2055 int ret;
2056
2025 ret = i915_cmd_parser_init_ring(engine); 2057 ret = i915_cmd_parser_init_ring(engine);
2026 if (ret) 2058 if (ret)
2027 goto error; 2059 goto error;
2028 2060
2029 ret = intel_lr_context_deferred_alloc(dctx, engine); 2061 ret = execlists_context_deferred_alloc(dctx, engine);
2030 if (ret) 2062 if (ret)
2031 goto error; 2063 goto error;
2032 2064
2033 /* As this is the default context, always pin it */ 2065 /* As this is the default context, always pin it */
2034 ret = intel_lr_context_do_pin(dctx, engine); 2066 ret = intel_lr_context_pin(dctx, engine);
2035 if (ret) { 2067 if (ret) {
2036 DRM_ERROR( 2068 DRM_ERROR("Failed to pin context for %s: %d\n",
2037 "Failed to pin and map ringbuffer %s: %d\n", 2069 engine->name, ret);
2038 engine->name, ret);
2039 goto error; 2070 goto error;
2040 } 2071 }
2041 2072
@@ -2055,22 +2086,12 @@ error:
2055 2086
2056static int logical_render_ring_init(struct drm_device *dev) 2087static int logical_render_ring_init(struct drm_device *dev)
2057{ 2088{
2058 struct drm_i915_private *dev_priv = dev->dev_private; 2089 struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
2059 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2060 int ret; 2090 int ret;
2061 2091
2062 engine->name = "render ring";
2063 engine->id = RCS;
2064 engine->exec_id = I915_EXEC_RENDER;
2065 engine->guc_id = GUC_RENDER_ENGINE;
2066 engine->mmio_base = RENDER_RING_BASE;
2067
2068 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
2069 if (HAS_L3_DPF(dev)) 2092 if (HAS_L3_DPF(dev))
2070 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2093 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2071 2094
2072 logical_ring_default_vfuncs(dev, engine);
2073
2074 /* Override some for render ring. */ 2095 /* Override some for render ring. */
2075 if (INTEL_INFO(dev)->gen >= 9) 2096 if (INTEL_INFO(dev)->gen >= 9)
2076 engine->init_hw = gen9_init_render_ring; 2097 engine->init_hw = gen9_init_render_ring;
@@ -2081,8 +2102,6 @@ static int logical_render_ring_init(struct drm_device *dev)
2081 engine->emit_flush = gen8_emit_flush_render; 2102 engine->emit_flush = gen8_emit_flush_render;
2082 engine->emit_request = gen8_emit_request_render; 2103 engine->emit_request = gen8_emit_request_render;
2083 2104
2084 engine->dev = dev;
2085
2086 ret = intel_init_pipe_control(engine); 2105 ret = intel_init_pipe_control(engine);
2087 if (ret) 2106 if (ret)
2088 return ret; 2107 return ret;
@@ -2098,7 +2117,7 @@ static int logical_render_ring_init(struct drm_device *dev)
2098 ret); 2117 ret);
2099 } 2118 }
2100 2119
2101 ret = logical_ring_init(dev, engine); 2120 ret = logical_ring_init(engine);
2102 if (ret) { 2121 if (ret) {
2103 lrc_destroy_wa_ctx_obj(engine); 2122 lrc_destroy_wa_ctx_obj(engine);
2104 } 2123 }
@@ -2108,70 +2127,30 @@ static int logical_render_ring_init(struct drm_device *dev)
2108 2127
2109static int logical_bsd_ring_init(struct drm_device *dev) 2128static int logical_bsd_ring_init(struct drm_device *dev)
2110{ 2129{
2111 struct drm_i915_private *dev_priv = dev->dev_private; 2130 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
2112 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
2113
2114 engine->name = "bsd ring";
2115 engine->id = VCS;
2116 engine->exec_id = I915_EXEC_BSD;
2117 engine->guc_id = GUC_VIDEO_ENGINE;
2118 engine->mmio_base = GEN6_BSD_RING_BASE;
2119
2120 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
2121 logical_ring_default_vfuncs(dev, engine);
2122 2131
2123 return logical_ring_init(dev, engine); 2132 return logical_ring_init(engine);
2124} 2133}
2125 2134
2126static int logical_bsd2_ring_init(struct drm_device *dev) 2135static int logical_bsd2_ring_init(struct drm_device *dev)
2127{ 2136{
2128 struct drm_i915_private *dev_priv = dev->dev_private; 2137 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
2129 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
2130
2131 engine->name = "bsd2 ring";
2132 engine->id = VCS2;
2133 engine->exec_id = I915_EXEC_BSD;
2134 engine->guc_id = GUC_VIDEO_ENGINE2;
2135 engine->mmio_base = GEN8_BSD2_RING_BASE;
2136
2137 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
2138 logical_ring_default_vfuncs(dev, engine);
2139 2138
2140 return logical_ring_init(dev, engine); 2139 return logical_ring_init(engine);
2141} 2140}
2142 2141
2143static int logical_blt_ring_init(struct drm_device *dev) 2142static int logical_blt_ring_init(struct drm_device *dev)
2144{ 2143{
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2144 struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
2146 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
2147
2148 engine->name = "blitter ring";
2149 engine->id = BCS;
2150 engine->exec_id = I915_EXEC_BLT;
2151 engine->guc_id = GUC_BLITTER_ENGINE;
2152 engine->mmio_base = BLT_RING_BASE;
2153
2154 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
2155 logical_ring_default_vfuncs(dev, engine);
2156 2145
2157 return logical_ring_init(dev, engine); 2146 return logical_ring_init(engine);
2158} 2147}
2159 2148
2160static int logical_vebox_ring_init(struct drm_device *dev) 2149static int logical_vebox_ring_init(struct drm_device *dev)
2161{ 2150{
2162 struct drm_i915_private *dev_priv = dev->dev_private; 2151 struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
2163 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
2164
2165 engine->name = "video enhancement ring";
2166 engine->id = VECS;
2167 engine->exec_id = I915_EXEC_VEBOX;
2168 engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
2169 engine->mmio_base = VEBOX_RING_BASE;
2170 2152
2171 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); 2153 return logical_ring_init(engine);
2172 logical_ring_default_vfuncs(dev, engine);
2173
2174 return logical_ring_init(dev, engine);
2175} 2154}
2176 2155
2177/** 2156/**
@@ -2232,7 +2211,7 @@ cleanup_render_ring:
2232} 2211}
2233 2212
2234static u32 2213static u32
2235make_rpcs(struct drm_device *dev) 2214make_rpcs(struct drm_i915_private *dev_priv)
2236{ 2215{
2237 u32 rpcs = 0; 2216 u32 rpcs = 0;
2238 2217
@@ -2240,7 +2219,7 @@ make_rpcs(struct drm_device *dev)
2240 * No explicit RPCS request is needed to ensure full 2219 * No explicit RPCS request is needed to ensure full
2241 * slice/subslice/EU enablement prior to Gen9. 2220 * slice/subslice/EU enablement prior to Gen9.
2242 */ 2221 */
2243 if (INTEL_INFO(dev)->gen < 9) 2222 if (INTEL_GEN(dev_priv) < 9)
2244 return 0; 2223 return 0;
2245 2224
2246 /* 2225 /*
@@ -2249,24 +2228,24 @@ make_rpcs(struct drm_device *dev)
2249 * must make an explicit request through RPCS for full 2228 * must make an explicit request through RPCS for full
2250 * enablement. 2229 * enablement.
2251 */ 2230 */
2252 if (INTEL_INFO(dev)->has_slice_pg) { 2231 if (INTEL_INFO(dev_priv)->has_slice_pg) {
2253 rpcs |= GEN8_RPCS_S_CNT_ENABLE; 2232 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2254 rpcs |= INTEL_INFO(dev)->slice_total << 2233 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
2255 GEN8_RPCS_S_CNT_SHIFT; 2234 GEN8_RPCS_S_CNT_SHIFT;
2256 rpcs |= GEN8_RPCS_ENABLE; 2235 rpcs |= GEN8_RPCS_ENABLE;
2257 } 2236 }
2258 2237
2259 if (INTEL_INFO(dev)->has_subslice_pg) { 2238 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
2260 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 2239 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2261 rpcs |= INTEL_INFO(dev)->subslice_per_slice << 2240 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
2262 GEN8_RPCS_SS_CNT_SHIFT; 2241 GEN8_RPCS_SS_CNT_SHIFT;
2263 rpcs |= GEN8_RPCS_ENABLE; 2242 rpcs |= GEN8_RPCS_ENABLE;
2264 } 2243 }
2265 2244
2266 if (INTEL_INFO(dev)->has_eu_pg) { 2245 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2267 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2246 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2268 GEN8_RPCS_EU_MIN_SHIFT; 2247 GEN8_RPCS_EU_MIN_SHIFT;
2269 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2248 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2270 GEN8_RPCS_EU_MAX_SHIFT; 2249 GEN8_RPCS_EU_MAX_SHIFT;
2271 rpcs |= GEN8_RPCS_ENABLE; 2250 rpcs |= GEN8_RPCS_ENABLE;
2272 } 2251 }
@@ -2278,9 +2257,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2278{ 2257{
2279 u32 indirect_ctx_offset; 2258 u32 indirect_ctx_offset;
2280 2259
2281 switch (INTEL_INFO(engine->dev)->gen) { 2260 switch (INTEL_GEN(engine->i915)) {
2282 default: 2261 default:
2283 MISSING_CASE(INTEL_INFO(engine->dev)->gen); 2262 MISSING_CASE(INTEL_GEN(engine->i915));
2284 /* fall through */ 2263 /* fall through */
2285 case 9: 2264 case 9:
2286 indirect_ctx_offset = 2265 indirect_ctx_offset =
@@ -2301,8 +2280,7 @@ populate_lr_context(struct intel_context *ctx,
2301 struct intel_engine_cs *engine, 2280 struct intel_engine_cs *engine,
2302 struct intel_ringbuffer *ringbuf) 2281 struct intel_ringbuffer *ringbuf)
2303{ 2282{
2304 struct drm_device *dev = engine->dev; 2283 struct drm_i915_private *dev_priv = ctx->i915;
2305 struct drm_i915_private *dev_priv = dev->dev_private;
2306 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2284 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2307 void *vaddr; 2285 void *vaddr;
2308 u32 *reg_state; 2286 u32 *reg_state;
@@ -2340,7 +2318,7 @@ populate_lr_context(struct intel_context *ctx,
2340 RING_CONTEXT_CONTROL(engine), 2318 RING_CONTEXT_CONTROL(engine),
2341 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2319 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2342 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2320 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2343 (HAS_RESOURCE_STREAMER(dev) ? 2321 (HAS_RESOURCE_STREAMER(dev_priv) ?
2344 CTX_CTRL_RS_CTX_ENABLE : 0))); 2322 CTX_CTRL_RS_CTX_ENABLE : 0)));
2345 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 2323 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2346 0); 2324 0);
@@ -2429,7 +2407,7 @@ populate_lr_context(struct intel_context *ctx,
2429 if (engine->id == RCS) { 2407 if (engine->id == RCS) {
2430 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2408 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2431 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2409 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2432 make_rpcs(dev)); 2410 make_rpcs(dev_priv));
2433 } 2411 }
2434 2412
2435 i915_gem_object_unpin_map(ctx_obj); 2413 i915_gem_object_unpin_map(ctx_obj);
@@ -2456,12 +2434,6 @@ void intel_lr_context_free(struct intel_context *ctx)
2456 if (!ctx_obj) 2434 if (!ctx_obj)
2457 continue; 2435 continue;
2458 2436
2459 if (ctx == ctx->i915->kernel_context) {
2460 intel_unpin_ringbuffer_obj(ringbuf);
2461 i915_gem_object_ggtt_unpin(ctx_obj);
2462 i915_gem_object_unpin_map(ctx_obj);
2463 }
2464
2465 WARN_ON(ctx->engine[i].pin_count); 2437 WARN_ON(ctx->engine[i].pin_count);
2466 intel_ringbuffer_free(ringbuf); 2438 intel_ringbuffer_free(ringbuf);
2467 drm_gem_object_unreference(&ctx_obj->base); 2439 drm_gem_object_unreference(&ctx_obj->base);
@@ -2486,11 +2458,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2486{ 2458{
2487 int ret = 0; 2459 int ret = 0;
2488 2460
2489 WARN_ON(INTEL_INFO(engine->dev)->gen < 8); 2461 WARN_ON(INTEL_GEN(engine->i915) < 8);
2490 2462
2491 switch (engine->id) { 2463 switch (engine->id) {
2492 case RCS: 2464 case RCS:
2493 if (INTEL_INFO(engine->dev)->gen >= 9) 2465 if (INTEL_GEN(engine->i915) >= 9)
2494 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2466 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2495 else 2467 else
2496 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2468 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2507,9 +2479,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2507} 2479}
2508 2480
2509/** 2481/**
2510 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context 2482 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
2511 * @ctx: LR context to create. 2483 * @ctx: LR context to create.
2512 * @ring: engine to be used with the context. 2484 * @engine: engine to be used with the context.
2513 * 2485 *
2514 * This function can be called more than once, with different engines, if we plan 2486 * This function can be called more than once, with different engines, if we plan
2515 * to use the context with them. The context backing objects and the ringbuffers 2487 * to use the context with them. The context backing objects and the ringbuffers
@@ -2519,11 +2491,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2519 * 2491 *
2520 * Return: non-zero on error. 2492 * Return: non-zero on error.
2521 */ 2493 */
2522 2494static int execlists_context_deferred_alloc(struct intel_context *ctx,
2523int intel_lr_context_deferred_alloc(struct intel_context *ctx, 2495 struct intel_engine_cs *engine)
2524 struct intel_engine_cs *engine)
2525{ 2496{
2526 struct drm_device *dev = engine->dev;
2527 struct drm_i915_gem_object *ctx_obj; 2497 struct drm_i915_gem_object *ctx_obj;
2528 uint32_t context_size; 2498 uint32_t context_size;
2529 struct intel_ringbuffer *ringbuf; 2499 struct intel_ringbuffer *ringbuf;
@@ -2537,10 +2507,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2537 /* One extra page as the sharing data between driver and GuC */ 2507 /* One extra page as the sharing data between driver and GuC */
2538 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2508 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2539 2509
2540 ctx_obj = i915_gem_alloc_object(dev, context_size); 2510 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
2541 if (!ctx_obj) { 2511 if (IS_ERR(ctx_obj)) {
2542 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2512 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2543 return -ENOMEM; 2513 return PTR_ERR(ctx_obj);
2544 } 2514 }
2545 2515
2546 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); 2516 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
@@ -2557,25 +2527,8 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2557 2527
2558 ctx->engine[engine->id].ringbuf = ringbuf; 2528 ctx->engine[engine->id].ringbuf = ringbuf;
2559 ctx->engine[engine->id].state = ctx_obj; 2529 ctx->engine[engine->id].state = ctx_obj;
2530 ctx->engine[engine->id].initialised = engine->init_context == NULL;
2560 2531
2561 if (ctx != ctx->i915->kernel_context && engine->init_context) {
2562 struct drm_i915_gem_request *req;
2563
2564 req = i915_gem_request_alloc(engine, ctx);
2565 if (IS_ERR(req)) {
2566 ret = PTR_ERR(req);
2567 DRM_ERROR("ring create req: %d\n", ret);
2568 goto error_ringbuf;
2569 }
2570
2571 ret = engine->init_context(req);
2572 i915_add_request_no_flush(req);
2573 if (ret) {
2574 DRM_ERROR("ring init context: %d\n",
2575 ret);
2576 goto error_ringbuf;
2577 }
2578 }
2579 return 0; 2532 return 0;
2580 2533
2581error_ringbuf: 2534error_ringbuf:
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 60a7385bc531..1afba0331dc6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -101,8 +101,6 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
101 101
102void intel_lr_context_free(struct intel_context *ctx); 102void intel_lr_context_free(struct intel_context *ctx);
103uint32_t intel_lr_context_size(struct intel_engine_cs *engine); 103uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
104int intel_lr_context_deferred_alloc(struct intel_context *ctx,
105 struct intel_engine_cs *engine);
106void intel_lr_context_unpin(struct intel_context *ctx, 104void intel_lr_context_unpin(struct intel_context *ctx,
107 struct intel_engine_cs *engine); 105 struct intel_engine_cs *engine);
108 106
@@ -113,16 +111,14 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
113uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 111uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
114 struct intel_engine_cs *engine); 112 struct intel_engine_cs *engine);
115 113
116u32 intel_execlists_ctx_id(struct intel_context *ctx,
117 struct intel_engine_cs *engine);
118
119/* Execlists */ 114/* Execlists */
120int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 115int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
116 int enable_execlists);
121struct i915_execbuffer_params; 117struct i915_execbuffer_params;
122int intel_execlists_submission(struct i915_execbuffer_params *params, 118int intel_execlists_submission(struct i915_execbuffer_params *params,
123 struct drm_i915_gem_execbuffer2 *args, 119 struct drm_i915_gem_execbuffer2 *args,
124 struct list_head *vmas); 120 struct list_head *vmas);
125 121
126void intel_execlists_retire_requests(struct intel_engine_cs *engine); 122void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
127 123
128#endif /* _INTEL_LRC_H_ */ 124#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bc53c0dd34d0..d65fd945607a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
190 /* Set the dithering flag on LVDS as needed, note that there is no 190 /* Set the dithering flag on LVDS as needed, note that there is no
191 * special lvds dither control bit on pch-split platforms, dithering is 191 * special lvds dither control bit on pch-split platforms, dithering is
192 * only controlled through the PIPECONF reg. */ 192 * only controlled through the PIPECONF reg. */
193 if (INTEL_INFO(dev)->gen == 4) { 193 if (IS_GEN4(dev_priv)) {
194 /* Bspec wording suggests that LVDS port dithering only exists 194 /* Bspec wording suggests that LVDS port dithering only exists
195 * for 18bpp panels. */ 195 * for 18bpp panels. */
196 if (crtc->config->dither && crtc->config->pipe_bpp == 18) 196 if (crtc->config->dither && crtc->config->pipe_bpp == 18)
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2a89..b765c75f3fcd 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
189 */ 189 */
190int intel_mocs_init_engine(struct intel_engine_cs *engine) 190int intel_mocs_init_engine(struct intel_engine_cs *engine)
191{ 191{
192 struct drm_i915_private *dev_priv = to_i915(engine->dev); 192 struct drm_i915_private *dev_priv = engine->i915;
193 struct drm_i915_mocs_table table; 193 struct drm_i915_mocs_table table;
194 unsigned int index; 194 unsigned int index;
195 195
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 99e26034ae8d..8347fd8af8e4 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -574,10 +574,8 @@ static void asle_work(struct work_struct *work)
574 asle->aslc = aslc_stat; 574 asle->aslc = aslc_stat;
575} 575}
576 576
577void intel_opregion_asle_intr(struct drm_device *dev) 577void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
578{ 578{
579 struct drm_i915_private *dev_priv = dev->dev_private;
580
581 if (dev_priv->opregion.asle) 579 if (dev_priv->opregion.asle)
582 schedule_work(&dev_priv->opregion.asle_work); 580 schedule_work(&dev_priv->opregion.asle_work);
583} 581}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index bd38e49f7334..eb93f90bb74d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -168,7 +168,7 @@ struct overlay_registers {
168}; 168};
169 169
170struct intel_overlay { 170struct intel_overlay {
171 struct drm_device *dev; 171 struct drm_i915_private *i915;
172 struct intel_crtc *crtc; 172 struct intel_crtc *crtc;
173 struct drm_i915_gem_object *vid_bo; 173 struct drm_i915_gem_object *vid_bo;
174 struct drm_i915_gem_object *old_vid_bo; 174 struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
190static struct overlay_registers __iomem * 190static struct overlay_registers __iomem *
191intel_overlay_map_regs(struct intel_overlay *overlay) 191intel_overlay_map_regs(struct intel_overlay *overlay)
192{ 192{
193 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 193 struct drm_i915_private *dev_priv = overlay->i915;
194 struct i915_ggtt *ggtt = &dev_priv->ggtt;
195 struct overlay_registers __iomem *regs; 194 struct overlay_registers __iomem *regs;
196 195
197 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 196 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
198 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; 197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
199 else 198 else
200 regs = io_mapping_map_wc(ggtt->mappable, 199 regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
201 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 200 overlay->flip_addr,
201 PAGE_SIZE);
202 202
203 return regs; 203 return regs;
204} 204}
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
206static void intel_overlay_unmap_regs(struct intel_overlay *overlay, 206static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
207 struct overlay_registers __iomem *regs) 207 struct overlay_registers __iomem *regs)
208{ 208{
209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
210 io_mapping_unmap(regs); 210 io_mapping_unmap(regs);
211} 211}
212 212
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
232/* overlay needs to be disable in OCMD reg */ 232/* overlay needs to be disable in OCMD reg */
233static int intel_overlay_on(struct intel_overlay *overlay) 233static int intel_overlay_on(struct intel_overlay *overlay)
234{ 234{
235 struct drm_device *dev = overlay->dev; 235 struct drm_i915_private *dev_priv = overlay->i915;
236 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 236 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
238 struct drm_i915_gem_request *req; 237 struct drm_i915_gem_request *req;
239 int ret; 238 int ret;
240 239
241 WARN_ON(overlay->active); 240 WARN_ON(overlay->active);
242 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 241 WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
243 242
244 req = i915_gem_request_alloc(engine, NULL); 243 req = i915_gem_request_alloc(engine, NULL);
245 if (IS_ERR(req)) 244 if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
266static int intel_overlay_continue(struct intel_overlay *overlay, 265static int intel_overlay_continue(struct intel_overlay *overlay,
267 bool load_polyphase_filter) 266 bool load_polyphase_filter)
268{ 267{
269 struct drm_device *dev = overlay->dev; 268 struct drm_i915_private *dev_priv = overlay->i915;
270 struct drm_i915_private *dev_priv = dev->dev_private;
271 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 269 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
272 struct drm_i915_gem_request *req; 270 struct drm_i915_gem_request *req;
273 u32 flip_addr = overlay->flip_addr; 271 u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
335/* overlay needs to be disabled in OCMD reg */ 333/* overlay needs to be disabled in OCMD reg */
336static int intel_overlay_off(struct intel_overlay *overlay) 334static int intel_overlay_off(struct intel_overlay *overlay)
337{ 335{
338 struct drm_device *dev = overlay->dev; 336 struct drm_i915_private *dev_priv = overlay->i915;
339 struct drm_i915_private *dev_priv = dev->dev_private;
340 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 337 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
341 struct drm_i915_gem_request *req; 338 struct drm_i915_gem_request *req;
342 u32 flip_addr = overlay->flip_addr; 339 u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
365 intel_ring_emit(engine, flip_addr); 362 intel_ring_emit(engine, flip_addr);
366 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 363 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
367 /* turn overlay off */ 364 /* turn overlay off */
368 if (IS_I830(dev)) { 365 if (IS_I830(dev_priv)) {
369 /* Workaround: Don't disable the overlay fully, since otherwise 366 /* Workaround: Don't disable the overlay fully, since otherwise
370 * it dies on the next OVERLAY_ON cmd. */ 367 * it dies on the next OVERLAY_ON cmd. */
371 intel_ring_emit(engine, MI_NOOP); 368 intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
408 */ 405 */
409static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 406static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
410{ 407{
411 struct drm_device *dev = overlay->dev; 408 struct drm_i915_private *dev_priv = overlay->i915;
412 struct drm_i915_private *dev_priv = dev->dev_private;
413 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 409 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
414 int ret; 410 int ret;
415 411
416 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 412 lockdep_assert_held(&dev_priv->dev->struct_mutex);
417 413
418 /* Only wait if there is actually an old frame to release to 414 /* Only wait if there is actually an old frame to release to
419 * guarantee forward progress. 415 * guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
537 } 533 }
538} 534}
539 535
540static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) 536static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
541{ 537{
542 u32 mask, shift, ret; 538 u32 mask, shift, ret;
543 if (IS_GEN2(dev)) { 539 if (IS_GEN2(dev_priv)) {
544 mask = 0x1f; 540 mask = 0x1f;
545 shift = 5; 541 shift = 5;
546 } else { 542 } else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
548 shift = 6; 544 shift = 6;
549 } 545 }
550 ret = ((offset + width + mask) >> shift) - (offset >> shift); 546 ret = ((offset + width + mask) >> shift) - (offset >> shift);
551 if (!IS_GEN2(dev)) 547 if (!IS_GEN2(dev_priv))
552 ret <<= 1; 548 ret <<= 1;
553 ret -= 1; 549 ret -= 1;
554 return ret << 2; 550 return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
741 int ret, tmp_width; 737 int ret, tmp_width;
742 struct overlay_registers __iomem *regs; 738 struct overlay_registers __iomem *regs;
743 bool scale_changed = false; 739 bool scale_changed = false;
744 struct drm_device *dev = overlay->dev; 740 struct drm_i915_private *dev_priv = overlay->i915;
745 u32 swidth, swidthsw, sheight, ostride; 741 u32 swidth, swidthsw, sheight, ostride;
746 enum pipe pipe = overlay->crtc->pipe; 742 enum pipe pipe = overlay->crtc->pipe;
747 743
748 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 744 lockdep_assert_held(&dev_priv->dev->struct_mutex);
749 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 745 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
750 746
751 ret = intel_overlay_release_old_vid(overlay); 747 ret = intel_overlay_release_old_vid(overlay);
752 if (ret != 0) 748 if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
769 goto out_unpin; 765 goto out_unpin;
770 } 766 }
771 oconfig = OCONF_CC_OUT_8BIT; 767 oconfig = OCONF_CC_OUT_8BIT;
772 if (IS_GEN4(overlay->dev)) 768 if (IS_GEN4(dev_priv))
773 oconfig |= OCONF_CSC_MODE_BT709; 769 oconfig |= OCONF_CSC_MODE_BT709;
774 oconfig |= pipe == 0 ? 770 oconfig |= pipe == 0 ?
775 OCONF_PIPE_A : OCONF_PIPE_B; 771 OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
796 tmp_width = params->src_w; 792 tmp_width = params->src_w;
797 793
798 swidth = params->src_w; 794 swidth = params->src_w;
799 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 795 swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
800 sheight = params->src_h; 796 sheight = params->src_h;
801 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y); 797 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
802 ostride = params->stride_Y; 798 ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
806 int uv_vscale = uv_vsubsampling(params->format); 802 int uv_vscale = uv_vsubsampling(params->format);
807 u32 tmp_U, tmp_V; 803 u32 tmp_U, tmp_V;
808 swidth |= (params->src_w/uv_hscale) << 16; 804 swidth |= (params->src_w/uv_hscale) << 16;
809 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 805 tmp_U = calc_swidthsw(dev_priv, params->offset_U,
810 params->src_w/uv_hscale); 806 params->src_w/uv_hscale);
811 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 807 tmp_V = calc_swidthsw(dev_priv, params->offset_V,
812 params->src_w/uv_hscale); 808 params->src_w/uv_hscale);
813 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 809 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
814 sheight |= (params->src_h/uv_vscale) << 16; 810 sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
840 overlay->old_vid_bo = overlay->vid_bo; 836 overlay->old_vid_bo = overlay->vid_bo;
841 overlay->vid_bo = new_bo; 837 overlay->vid_bo = new_bo;
842 838
843 intel_frontbuffer_flip(dev, 839 intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
844 INTEL_FRONTBUFFER_OVERLAY(pipe));
845 840
846 return 0; 841 return 0;
847 842
@@ -852,12 +847,12 @@ out_unpin:
852 847
853int intel_overlay_switch_off(struct intel_overlay *overlay) 848int intel_overlay_switch_off(struct intel_overlay *overlay)
854{ 849{
850 struct drm_i915_private *dev_priv = overlay->i915;
855 struct overlay_registers __iomem *regs; 851 struct overlay_registers __iomem *regs;
856 struct drm_device *dev = overlay->dev;
857 int ret; 852 int ret;
858 853
859 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 854 lockdep_assert_held(&dev_priv->dev->struct_mutex);
860 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 855 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
861 856
862 ret = intel_overlay_recover_from_interrupt(overlay); 857 ret = intel_overlay_recover_from_interrupt(overlay);
863 if (ret != 0) 858 if (ret != 0)
@@ -897,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
897 892
898static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 893static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
899{ 894{
900 struct drm_device *dev = overlay->dev; 895 struct drm_i915_private *dev_priv = overlay->i915;
901 struct drm_i915_private *dev_priv = dev->dev_private;
902 u32 pfit_control = I915_READ(PFIT_CONTROL); 896 u32 pfit_control = I915_READ(PFIT_CONTROL);
903 u32 ratio; 897 u32 ratio;
904 898
905 /* XXX: This is not the same logic as in the xorg driver, but more in 899 /* XXX: This is not the same logic as in the xorg driver, but more in
906 * line with the intel documentation for the i965 900 * line with the intel documentation for the i965
907 */ 901 */
908 if (INTEL_INFO(dev)->gen >= 4) { 902 if (INTEL_GEN(dev_priv) >= 4) {
909 /* on i965 use the PGM reg to read out the autoscaler values */ 903 /* on i965 use the PGM reg to read out the autoscaler values */
910 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; 904 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
911 } else { 905 } else {
@@ -948,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
948 return 0; 942 return 0;
949} 943}
950 944
951static int check_overlay_src(struct drm_device *dev, 945static int check_overlay_src(struct drm_i915_private *dev_priv,
952 struct drm_intel_overlay_put_image *rec, 946 struct drm_intel_overlay_put_image *rec,
953 struct drm_i915_gem_object *new_bo) 947 struct drm_i915_gem_object *new_bo)
954{ 948{
@@ -959,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev,
959 u32 tmp; 953 u32 tmp;
960 954
961 /* check src dimensions */ 955 /* check src dimensions */
962 if (IS_845G(dev) || IS_I830(dev)) { 956 if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
963 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || 957 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
964 rec->src_width > IMAGE_MAX_WIDTH_LEGACY) 958 rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
965 return -EINVAL; 959 return -EINVAL;
@@ -1011,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev,
1011 return -EINVAL; 1005 return -EINVAL;
1012 1006
1013 /* stride checking */ 1007 /* stride checking */
1014 if (IS_I830(dev) || IS_845G(dev)) 1008 if (IS_I830(dev_priv) || IS_845G(dev_priv))
1015 stride_mask = 255; 1009 stride_mask = 255;
1016 else 1010 else
1017 stride_mask = 63; 1011 stride_mask = 63;
1018 1012
1019 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 1013 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1020 return -EINVAL; 1014 return -EINVAL;
1021 if (IS_GEN4(dev) && rec->stride_Y < 512) 1015 if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
1022 return -EINVAL; 1016 return -EINVAL;
1023 1017
1024 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 1018 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev,
1063 * Return the pipe currently connected to the panel fitter, 1057 * Return the pipe currently connected to the panel fitter,
1064 * or -1 if the panel fitter is not present or not in use 1058 * or -1 if the panel fitter is not present or not in use
1065 */ 1059 */
1066static int intel_panel_fitter_pipe(struct drm_device *dev) 1060static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
1067{ 1061{
1068 struct drm_i915_private *dev_priv = dev->dev_private;
1069 u32 pfit_control; 1062 u32 pfit_control;
1070 1063
1071 /* i830 doesn't have a panel fitter */ 1064 /* i830 doesn't have a panel fitter */
1072 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 1065 if (INTEL_GEN(dev_priv) <= 3 &&
1066 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
1073 return -1; 1067 return -1;
1074 1068
1075 pfit_control = I915_READ(PFIT_CONTROL); 1069 pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
1079 return -1; 1073 return -1;
1080 1074
1081 /* 965 can place panel fitter on either pipe */ 1075 /* 965 can place panel fitter on either pipe */
1082 if (IS_GEN4(dev)) 1076 if (IS_GEN4(dev_priv))
1083 return (pfit_control >> 29) & 0x3; 1077 return (pfit_control >> 29) & 0x3;
1084 1078
1085 /* older chips can only use pipe 1 */ 1079 /* older chips can only use pipe 1 */
1086 return 1; 1080 return 1;
1087} 1081}
1088 1082
1089int intel_overlay_put_image(struct drm_device *dev, void *data, 1083int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1090 struct drm_file *file_priv) 1084 struct drm_file *file_priv)
1091{ 1085{
1092 struct drm_intel_overlay_put_image *put_image_rec = data; 1086 struct drm_intel_overlay_put_image *put_image_rec = data;
1093 struct drm_i915_private *dev_priv = dev->dev_private; 1087 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1162,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1162 1156
1163 /* line too wide, i.e. one-line-mode */ 1157 /* line too wide, i.e. one-line-mode */
1164 if (mode->hdisplay > 1024 && 1158 if (mode->hdisplay > 1024 &&
1165 intel_panel_fitter_pipe(dev) == crtc->pipe) { 1159 intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
1166 overlay->pfit_active = true; 1160 overlay->pfit_active = true;
1167 update_pfit_vscale_ratio(overlay); 1161 update_pfit_vscale_ratio(overlay);
1168 } else 1162 } else
@@ -1196,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1196 goto out_unlock; 1190 goto out_unlock;
1197 } 1191 }
1198 1192
1199 ret = check_overlay_src(dev, put_image_rec, new_bo); 1193 ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
1200 if (ret != 0) 1194 if (ret != 0)
1201 goto out_unlock; 1195 goto out_unlock;
1202 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; 1196 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1284 return 0; 1278 return 0;
1285} 1279}
1286 1280
1287int intel_overlay_attrs(struct drm_device *dev, void *data, 1281int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1288 struct drm_file *file_priv) 1282 struct drm_file *file_priv)
1289{ 1283{
1290 struct drm_intel_overlay_attrs *attrs = data; 1284 struct drm_intel_overlay_attrs *attrs = data;
1291 struct drm_i915_private *dev_priv = dev->dev_private; 1285 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1309 attrs->contrast = overlay->contrast; 1303 attrs->contrast = overlay->contrast;
1310 attrs->saturation = overlay->saturation; 1304 attrs->saturation = overlay->saturation;
1311 1305
1312 if (!IS_GEN2(dev)) { 1306 if (!IS_GEN2(dev_priv)) {
1313 attrs->gamma0 = I915_READ(OGAMC0); 1307 attrs->gamma0 = I915_READ(OGAMC0);
1314 attrs->gamma1 = I915_READ(OGAMC1); 1308 attrs->gamma1 = I915_READ(OGAMC1);
1315 attrs->gamma2 = I915_READ(OGAMC2); 1309 attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1341 intel_overlay_unmap_regs(overlay, regs); 1335 intel_overlay_unmap_regs(overlay, regs);
1342 1336
1343 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1337 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1344 if (IS_GEN2(dev)) 1338 if (IS_GEN2(dev_priv))
1345 goto out_unlock; 1339 goto out_unlock;
1346 1340
1347 if (overlay->active) { 1341 if (overlay->active) {
@@ -1371,37 +1365,36 @@ out_unlock:
1371 return ret; 1365 return ret;
1372} 1366}
1373 1367
1374void intel_setup_overlay(struct drm_device *dev) 1368void intel_setup_overlay(struct drm_i915_private *dev_priv)
1375{ 1369{
1376 struct drm_i915_private *dev_priv = dev->dev_private;
1377 struct intel_overlay *overlay; 1370 struct intel_overlay *overlay;
1378 struct drm_i915_gem_object *reg_bo; 1371 struct drm_i915_gem_object *reg_bo;
1379 struct overlay_registers __iomem *regs; 1372 struct overlay_registers __iomem *regs;
1380 int ret; 1373 int ret;
1381 1374
1382 if (!HAS_OVERLAY(dev)) 1375 if (!HAS_OVERLAY(dev_priv))
1383 return; 1376 return;
1384 1377
1385 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); 1378 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1386 if (!overlay) 1379 if (!overlay)
1387 return; 1380 return;
1388 1381
1389 mutex_lock(&dev->struct_mutex); 1382 mutex_lock(&dev_priv->dev->struct_mutex);
1390 if (WARN_ON(dev_priv->overlay)) 1383 if (WARN_ON(dev_priv->overlay))
1391 goto out_free; 1384 goto out_free;
1392 1385
1393 overlay->dev = dev; 1386 overlay->i915 = dev_priv;
1394 1387
1395 reg_bo = NULL; 1388 reg_bo = NULL;
1396 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1389 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1397 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); 1390 reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
1398 if (reg_bo == NULL)
1399 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1400 if (reg_bo == NULL) 1391 if (reg_bo == NULL)
1392 reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
1393 if (IS_ERR(reg_bo))
1401 goto out_free; 1394 goto out_free;
1402 overlay->reg_bo = reg_bo; 1395 overlay->reg_bo = reg_bo;
1403 1396
1404 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1397 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
1405 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); 1398 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1406 if (ret) { 1399 if (ret) {
1407 DRM_ERROR("failed to attach phys overlay regs\n"); 1400 DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev)
1441 intel_overlay_unmap_regs(overlay, regs); 1434 intel_overlay_unmap_regs(overlay, regs);
1442 1435
1443 dev_priv->overlay = overlay; 1436 dev_priv->overlay = overlay;
1444 mutex_unlock(&dev->struct_mutex); 1437 mutex_unlock(&dev_priv->dev->struct_mutex);
1445 DRM_INFO("initialized overlay support\n"); 1438 DRM_INFO("initialized overlay support\n");
1446 return; 1439 return;
1447 1440
1448out_unpin_bo: 1441out_unpin_bo:
1449 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1442 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1450 i915_gem_object_ggtt_unpin(reg_bo); 1443 i915_gem_object_ggtt_unpin(reg_bo);
1451out_free_bo: 1444out_free_bo:
1452 drm_gem_object_unreference(&reg_bo->base); 1445 drm_gem_object_unreference(&reg_bo->base);
1453out_free: 1446out_free:
1454 mutex_unlock(&dev->struct_mutex); 1447 mutex_unlock(&dev_priv->dev->struct_mutex);
1455 kfree(overlay); 1448 kfree(overlay);
1456 return; 1449 return;
1457} 1450}
1458 1451
1459void intel_cleanup_overlay(struct drm_device *dev) 1452void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
1460{ 1453{
1461 struct drm_i915_private *dev_priv = dev->dev_private;
1462
1463 if (!dev_priv->overlay) 1454 if (!dev_priv->overlay)
1464 return; 1455 return;
1465 1456
@@ -1482,18 +1473,17 @@ struct intel_overlay_error_state {
1482static struct overlay_registers __iomem * 1473static struct overlay_registers __iomem *
1483intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1474intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1484{ 1475{
1485 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 1476 struct drm_i915_private *dev_priv = overlay->i915;
1486 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1487 struct overlay_registers __iomem *regs; 1477 struct overlay_registers __iomem *regs;
1488 1478
1489 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1479 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
1490 /* Cast to make sparse happy, but it's wc memory anyway, so 1480 /* Cast to make sparse happy, but it's wc memory anyway, so
1491 * equivalent to the wc io mapping on X86. */ 1481 * equivalent to the wc io mapping on X86. */
1492 regs = (struct overlay_registers __iomem *) 1482 regs = (struct overlay_registers __iomem *)
1493 overlay->reg_bo->phys_handle->vaddr; 1483 overlay->reg_bo->phys_handle->vaddr;
1494 else 1484 else
1495 regs = io_mapping_map_atomic_wc(ggtt->mappable, 1485 regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
1496 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1486 overlay->flip_addr);
1497 1487
1498 return regs; 1488 return regs;
1499} 1489}
@@ -1501,15 +1491,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1501static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, 1491static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1502 struct overlay_registers __iomem *regs) 1492 struct overlay_registers __iomem *regs)
1503{ 1493{
1504 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1494 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
1505 io_mapping_unmap_atomic(regs); 1495 io_mapping_unmap_atomic(regs);
1506} 1496}
1507 1497
1508
1509struct intel_overlay_error_state * 1498struct intel_overlay_error_state *
1510intel_overlay_capture_error_state(struct drm_device *dev) 1499intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1511{ 1500{
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 struct intel_overlay *overlay = dev_priv->overlay; 1501 struct intel_overlay *overlay = dev_priv->overlay;
1514 struct intel_overlay_error_state *error; 1502 struct intel_overlay_error_state *error;
1515 struct overlay_registers __iomem *regs; 1503 struct overlay_registers __iomem *regs;
@@ -1523,10 +1511,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1523 1511
1524 error->dovsta = I915_READ(DOVSTA); 1512 error->dovsta = I915_READ(DOVSTA);
1525 error->isr = I915_READ(ISR); 1513 error->isr = I915_READ(ISR);
1526 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1514 error->base = overlay->flip_addr;
1527 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1528 else
1529 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1530 1515
1531 regs = intel_overlay_map_regs_atomic(overlay); 1516 regs = intel_overlay_map_regs_atomic(overlay);
1532 if (!regs) 1517 if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8357d571553a..f0b1602c3258 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1724,6 +1724,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1724 container_of(panel, struct intel_connector, panel); 1724 container_of(panel, struct intel_connector, panel);
1725 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1725 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1726 1726
1727 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
1728 intel_dp_aux_init_backlight_funcs(connector) == 0)
1729 return;
1730
1731 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
1732 intel_dsi_dcs_init_backlight_funcs(connector) == 0)
1733 return;
1734
1727 if (IS_BROXTON(dev_priv)) { 1735 if (IS_BROXTON(dev_priv)) {
1728 panel->backlight.setup = bxt_setup_backlight; 1736 panel->backlight.setup = bxt_setup_backlight;
1729 panel->backlight.enable = bxt_enable_backlight; 1737 panel->backlight.enable = bxt_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a7ef45da0a9e..29bdd79d9039 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -58,6 +58,10 @@ static void bxt_init_clock_gating(struct drm_device *dev)
58{ 58{
59 struct drm_i915_private *dev_priv = dev->dev_private; 59 struct drm_i915_private *dev_priv = dev->dev_private;
60 60
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:bxt */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
64
61 /* WaDisableSDEUnitClockGating:bxt */ 65 /* WaDisableSDEUnitClockGating:bxt */
62 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 66 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
63 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 67 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -2012,10 +2016,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2012} 2016}
2013 2017
2014static uint32_t 2018static uint32_t
2015hsw_compute_linetime_wm(struct drm_device *dev, 2019hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2016 struct intel_crtc_state *cstate)
2017{ 2020{
2018 struct drm_i915_private *dev_priv = dev->dev_private; 2021 const struct intel_atomic_state *intel_state =
2022 to_intel_atomic_state(cstate->base.state);
2019 const struct drm_display_mode *adjusted_mode = 2023 const struct drm_display_mode *adjusted_mode =
2020 &cstate->base.adjusted_mode; 2024 &cstate->base.adjusted_mode;
2021 u32 linetime, ips_linetime; 2025 u32 linetime, ips_linetime;
@@ -2024,7 +2028,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2024 return 0; 2028 return 0;
2025 if (WARN_ON(adjusted_mode->crtc_clock == 0)) 2029 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2026 return 0; 2030 return 0;
2027 if (WARN_ON(dev_priv->cdclk_freq == 0)) 2031 if (WARN_ON(intel_state->cdclk == 0))
2028 return 0; 2032 return 0;
2029 2033
2030 /* The WM are computed with base on how long it takes to fill a single 2034 /* The WM are computed with base on how long it takes to fill a single
@@ -2033,7 +2037,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2033 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2037 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2034 adjusted_mode->crtc_clock); 2038 adjusted_mode->crtc_clock);
2035 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2039 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2036 dev_priv->cdclk_freq); 2040 intel_state->cdclk);
2037 2041
2038 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2042 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2039 PIPE_WM_LINETIME_TIME(linetime); 2043 PIPE_WM_LINETIME_TIME(linetime);
@@ -2146,14 +2150,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2146static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2150static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2147{ 2151{
2148 /* ILK sprite LP0 latency is 1300 ns */ 2152 /* ILK sprite LP0 latency is 1300 ns */
2149 if (INTEL_INFO(dev)->gen == 5) 2153 if (IS_GEN5(dev))
2150 wm[0] = 13; 2154 wm[0] = 13;
2151} 2155}
2152 2156
2153static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2157static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2154{ 2158{
2155 /* ILK cursor LP0 latency is 1300 ns */ 2159 /* ILK cursor LP0 latency is 1300 ns */
2156 if (INTEL_INFO(dev)->gen == 5) 2160 if (IS_GEN5(dev))
2157 wm[0] = 13; 2161 wm[0] = 13;
2158 2162
2159 /* WaDoubleCursorLP3Latency:ivb */ 2163 /* WaDoubleCursorLP3Latency:ivb */
@@ -2309,7 +2313,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2309 int level, max_level = ilk_wm_max_level(dev), usable_level; 2313 int level, max_level = ilk_wm_max_level(dev), usable_level;
2310 struct ilk_wm_maximums max; 2314 struct ilk_wm_maximums max;
2311 2315
2312 pipe_wm = &cstate->wm.optimal.ilk; 2316 pipe_wm = &cstate->wm.ilk.optimal;
2313 2317
2314 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2318 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2315 struct intel_plane_state *ps; 2319 struct intel_plane_state *ps;
@@ -2352,7 +2356,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2352 pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 2356 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2353 2357
2354 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2358 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2355 pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); 2359 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2356 2360
2357 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 2361 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2358 return -EINVAL; 2362 return -EINVAL;
@@ -2391,7 +2395,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2391 struct intel_crtc *intel_crtc, 2395 struct intel_crtc *intel_crtc,
2392 struct intel_crtc_state *newstate) 2396 struct intel_crtc_state *newstate)
2393{ 2397{
2394 struct intel_pipe_wm *a = &newstate->wm.intermediate; 2398 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2395 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; 2399 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2396 int level, max_level = ilk_wm_max_level(dev); 2400 int level, max_level = ilk_wm_max_level(dev);
2397 2401
@@ -2400,7 +2404,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2400 * currently active watermarks to get values that are safe both before 2404 * currently active watermarks to get values that are safe both before
2401 * and after the vblank. 2405 * and after the vblank.
2402 */ 2406 */
2403 *a = newstate->wm.optimal.ilk; 2407 *a = newstate->wm.ilk.optimal;
2404 a->pipe_enabled |= b->pipe_enabled; 2408 a->pipe_enabled |= b->pipe_enabled;
2405 a->sprites_enabled |= b->sprites_enabled; 2409 a->sprites_enabled |= b->sprites_enabled;
2406 a->sprites_scaled |= b->sprites_scaled; 2410 a->sprites_scaled |= b->sprites_scaled;
@@ -2429,7 +2433,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2429 * If our intermediate WM are identical to the final WM, then we can 2433 * If our intermediate WM are identical to the final WM, then we can
2430 * omit the post-vblank programming; only update if it's different. 2434 * omit the post-vblank programming; only update if it's different.
2431 */ 2435 */
2432 if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) 2436 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2433 newstate->wm.need_postvbl_update = false; 2437 newstate->wm.need_postvbl_update = false;
2434 2438
2435 return 0; 2439 return 0;
@@ -2849,20 +2853,29 @@ skl_wm_plane_id(const struct intel_plane *plane)
2849static void 2853static void
2850skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2854skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2851 const struct intel_crtc_state *cstate, 2855 const struct intel_crtc_state *cstate,
2852 const struct intel_wm_config *config, 2856 struct skl_ddb_entry *alloc, /* out */
2853 struct skl_ddb_entry *alloc /* out */) 2857 int *num_active /* out */)
2854{ 2858{
2859 struct drm_atomic_state *state = cstate->base.state;
2860 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2861 struct drm_i915_private *dev_priv = to_i915(dev);
2855 struct drm_crtc *for_crtc = cstate->base.crtc; 2862 struct drm_crtc *for_crtc = cstate->base.crtc;
2856 struct drm_crtc *crtc;
2857 unsigned int pipe_size, ddb_size; 2863 unsigned int pipe_size, ddb_size;
2858 int nth_active_pipe; 2864 int nth_active_pipe;
2865 int pipe = to_intel_crtc(for_crtc)->pipe;
2859 2866
2860 if (!cstate->base.active) { 2867 if (WARN_ON(!state) || !cstate->base.active) {
2861 alloc->start = 0; 2868 alloc->start = 0;
2862 alloc->end = 0; 2869 alloc->end = 0;
2870 *num_active = hweight32(dev_priv->active_crtcs);
2863 return; 2871 return;
2864 } 2872 }
2865 2873
2874 if (intel_state->active_pipe_changes)
2875 *num_active = hweight32(intel_state->active_crtcs);
2876 else
2877 *num_active = hweight32(dev_priv->active_crtcs);
2878
2866 if (IS_BROXTON(dev)) 2879 if (IS_BROXTON(dev))
2867 ddb_size = BXT_DDB_SIZE; 2880 ddb_size = BXT_DDB_SIZE;
2868 else 2881 else
@@ -2870,25 +2883,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2870 2883
2871 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2884 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2872 2885
2873 nth_active_pipe = 0; 2886 /*
2874 for_each_crtc(dev, crtc) { 2887 * If the state doesn't change the active CRTC's, then there's
2875 if (!to_intel_crtc(crtc)->active) 2888 * no need to recalculate; the existing pipe allocation limits
2876 continue; 2889 * should remain unchanged. Note that we're safe from racing
2877 2890 * commits since any racing commit that changes the active CRTC
2878 if (crtc == for_crtc) 2891 * list would need to grab _all_ crtc locks, including the one
2879 break; 2892 * we currently hold.
2880 2893 */
2881 nth_active_pipe++; 2894 if (!intel_state->active_pipe_changes) {
2895 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2896 return;
2882 } 2897 }
2883 2898
2884 pipe_size = ddb_size / config->num_pipes_active; 2899 nth_active_pipe = hweight32(intel_state->active_crtcs &
2885 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; 2900 (drm_crtc_mask(for_crtc) - 1));
2901 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2902 alloc->start = nth_active_pipe * ddb_size / *num_active;
2886 alloc->end = alloc->start + pipe_size; 2903 alloc->end = alloc->start + pipe_size;
2887} 2904}
2888 2905
2889static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) 2906static unsigned int skl_cursor_allocation(int num_active)
2890{ 2907{
2891 if (config->num_pipes_active == 1) 2908 if (num_active == 1)
2892 return 32; 2909 return 32;
2893 2910
2894 return 8; 2911 return 8;
@@ -2940,6 +2957,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2940 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 2957 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2941 struct drm_framebuffer *fb = pstate->fb; 2958 struct drm_framebuffer *fb = pstate->fb;
2942 uint32_t width = 0, height = 0; 2959 uint32_t width = 0, height = 0;
2960 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
2961
2962 if (!intel_pstate->visible)
2963 return 0;
2964 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
2965 return 0;
2966 if (y && format != DRM_FORMAT_NV12)
2967 return 0;
2943 2968
2944 width = drm_rect_width(&intel_pstate->src) >> 16; 2969 width = drm_rect_width(&intel_pstate->src) >> 16;
2945 height = drm_rect_height(&intel_pstate->src) >> 16; 2970 height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -2948,17 +2973,17 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2948 swap(width, height); 2973 swap(width, height);
2949 2974
2950 /* for planar format */ 2975 /* for planar format */
2951 if (fb->pixel_format == DRM_FORMAT_NV12) { 2976 if (format == DRM_FORMAT_NV12) {
2952 if (y) /* y-plane data rate */ 2977 if (y) /* y-plane data rate */
2953 return width * height * 2978 return width * height *
2954 drm_format_plane_cpp(fb->pixel_format, 0); 2979 drm_format_plane_cpp(format, 0);
2955 else /* uv-plane data rate */ 2980 else /* uv-plane data rate */
2956 return (width / 2) * (height / 2) * 2981 return (width / 2) * (height / 2) *
2957 drm_format_plane_cpp(fb->pixel_format, 1); 2982 drm_format_plane_cpp(format, 1);
2958 } 2983 }
2959 2984
2960 /* for packed formats */ 2985 /* for packed formats */
2961 return width * height * drm_format_plane_cpp(fb->pixel_format, 0); 2986 return width * height * drm_format_plane_cpp(format, 0);
2962} 2987}
2963 2988
2964/* 2989/*
@@ -2967,86 +2992,128 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2967 * 3 * 4096 * 8192 * 4 < 2^32 2992 * 3 * 4096 * 8192 * 4 < 2^32
2968 */ 2993 */
2969static unsigned int 2994static unsigned int
2970skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) 2995skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
2971{ 2996{
2972 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2997 struct drm_crtc_state *cstate = &intel_cstate->base;
2973 struct drm_device *dev = intel_crtc->base.dev; 2998 struct drm_atomic_state *state = cstate->state;
2999 struct drm_crtc *crtc = cstate->crtc;
3000 struct drm_device *dev = crtc->dev;
3001 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3002 const struct drm_plane *plane;
2974 const struct intel_plane *intel_plane; 3003 const struct intel_plane *intel_plane;
2975 unsigned int total_data_rate = 0; 3004 struct drm_plane_state *pstate;
3005 unsigned int rate, total_data_rate = 0;
3006 int id;
3007 int i;
2976 3008
2977 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3009 if (WARN_ON(!state))
2978 const struct drm_plane_state *pstate = intel_plane->base.state; 3010 return 0;
2979 3011
2980 if (pstate->fb == NULL) 3012 /* Calculate and cache data rate for each plane */
2981 continue; 3013 for_each_plane_in_state(state, plane, pstate, i) {
3014 id = skl_wm_plane_id(to_intel_plane(plane));
3015 intel_plane = to_intel_plane(plane);
2982 3016
2983 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 3017 if (intel_plane->pipe != intel_crtc->pipe)
2984 continue; 3018 continue;
2985 3019
2986 /* packed/uv */ 3020 /* packed/uv */
2987 total_data_rate += skl_plane_relative_data_rate(cstate, 3021 rate = skl_plane_relative_data_rate(intel_cstate,
2988 pstate, 3022 pstate, 0);
2989 0); 3023 intel_cstate->wm.skl.plane_data_rate[id] = rate;
2990 3024
2991 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) 3025 /* y-plane */
2992 /* y-plane */ 3026 rate = skl_plane_relative_data_rate(intel_cstate,
2993 total_data_rate += skl_plane_relative_data_rate(cstate, 3027 pstate, 1);
2994 pstate, 3028 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
2995 1);
2996 } 3029 }
2997 3030
3031 /* Calculate CRTC's total data rate from cached values */
3032 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3033 int id = skl_wm_plane_id(intel_plane);
3034
3035 /* packed/uv */
3036 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3037 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3038 }
3039
3040 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3041
2998 return total_data_rate; 3042 return total_data_rate;
2999} 3043}
3000 3044
3001static void 3045static int
3002skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3046skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3003 struct skl_ddb_allocation *ddb /* out */) 3047 struct skl_ddb_allocation *ddb /* out */)
3004{ 3048{
3049 struct drm_atomic_state *state = cstate->base.state;
3005 struct drm_crtc *crtc = cstate->base.crtc; 3050 struct drm_crtc *crtc = cstate->base.crtc;
3006 struct drm_device *dev = crtc->dev; 3051 struct drm_device *dev = crtc->dev;
3007 struct drm_i915_private *dev_priv = to_i915(dev);
3008 struct intel_wm_config *config = &dev_priv->wm.config;
3009 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3052 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3010 struct intel_plane *intel_plane; 3053 struct intel_plane *intel_plane;
3054 struct drm_plane *plane;
3055 struct drm_plane_state *pstate;
3011 enum pipe pipe = intel_crtc->pipe; 3056 enum pipe pipe = intel_crtc->pipe;
3012 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 3057 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3013 uint16_t alloc_size, start, cursor_blocks; 3058 uint16_t alloc_size, start, cursor_blocks;
3014 uint16_t minimum[I915_MAX_PLANES]; 3059 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3015 uint16_t y_minimum[I915_MAX_PLANES]; 3060 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3016 unsigned int total_data_rate; 3061 unsigned int total_data_rate;
3062 int num_active;
3063 int id, i;
3064
3065 if (WARN_ON(!state))
3066 return 0;
3017 3067
3018 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); 3068 if (!cstate->base.active) {
3069 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3070 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3071 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3072 return 0;
3073 }
3074
3075 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3019 alloc_size = skl_ddb_entry_size(alloc); 3076 alloc_size = skl_ddb_entry_size(alloc);
3020 if (alloc_size == 0) { 3077 if (alloc_size == 0) {
3021 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3078 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3022 memset(&ddb->plane[pipe][PLANE_CURSOR], 0, 3079 return 0;
3023 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
3024 return;
3025 } 3080 }
3026 3081
3027 cursor_blocks = skl_cursor_allocation(config); 3082 cursor_blocks = skl_cursor_allocation(num_active);
3028 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 3083 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3029 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 3084 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3030 3085
3031 alloc_size -= cursor_blocks; 3086 alloc_size -= cursor_blocks;
3032 alloc->end -= cursor_blocks;
3033 3087
3034 /* 1. Allocate the mininum required blocks for each active plane */ 3088 /* 1. Allocate the mininum required blocks for each active plane */
3035 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3089 for_each_plane_in_state(state, plane, pstate, i) {
3036 struct drm_plane *plane = &intel_plane->base; 3090 intel_plane = to_intel_plane(plane);
3037 struct drm_framebuffer *fb = plane->state->fb; 3091 id = skl_wm_plane_id(intel_plane);
3038 int id = skl_wm_plane_id(intel_plane);
3039 3092
3040 if (!to_intel_plane_state(plane->state)->visible) 3093 if (intel_plane->pipe != pipe)
3041 continue; 3094 continue;
3042 3095
3043 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3096 if (!to_intel_plane_state(pstate)->visible) {
3097 minimum[id] = 0;
3098 y_minimum[id] = 0;
3044 continue; 3099 continue;
3100 }
3101 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3102 minimum[id] = 0;
3103 y_minimum[id] = 0;
3104 continue;
3105 }
3045 3106
3046 minimum[id] = 8; 3107 minimum[id] = 8;
3047 alloc_size -= minimum[id]; 3108 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
3048 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; 3109 y_minimum[id] = 8;
3049 alloc_size -= y_minimum[id]; 3110 else
3111 y_minimum[id] = 0;
3112 }
3113
3114 for (i = 0; i < PLANE_CURSOR; i++) {
3115 alloc_size -= minimum[i];
3116 alloc_size -= y_minimum[i];
3050 } 3117 }
3051 3118
3052 /* 3119 /*
@@ -3056,21 +3123,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3056 * FIXME: we may not allocate every single block here. 3123 * FIXME: we may not allocate every single block here.
3057 */ 3124 */
3058 total_data_rate = skl_get_total_relative_data_rate(cstate); 3125 total_data_rate = skl_get_total_relative_data_rate(cstate);
3126 if (total_data_rate == 0)
3127 return 0;
3059 3128
3060 start = alloc->start; 3129 start = alloc->start;
3061 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3130 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3062 struct drm_plane *plane = &intel_plane->base;
3063 struct drm_plane_state *pstate = intel_plane->base.state;
3064 unsigned int data_rate, y_data_rate; 3131 unsigned int data_rate, y_data_rate;
3065 uint16_t plane_blocks, y_plane_blocks = 0; 3132 uint16_t plane_blocks, y_plane_blocks = 0;
3066 int id = skl_wm_plane_id(intel_plane); 3133 int id = skl_wm_plane_id(intel_plane);
3067 3134
3068 if (!to_intel_plane_state(pstate)->visible) 3135 data_rate = cstate->wm.skl.plane_data_rate[id];
3069 continue;
3070 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3071 continue;
3072
3073 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
3074 3136
3075 /* 3137 /*
3076 * allocation for (packed formats) or (uv-plane part of planar format): 3138 * allocation for (packed formats) or (uv-plane part of planar format):
@@ -3081,30 +3143,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3081 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3143 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3082 total_data_rate); 3144 total_data_rate);
3083 3145
3084 ddb->plane[pipe][id].start = start; 3146 /* Leave disabled planes at (0,0) */
3085 ddb->plane[pipe][id].end = start + plane_blocks; 3147 if (data_rate) {
3148 ddb->plane[pipe][id].start = start;
3149 ddb->plane[pipe][id].end = start + plane_blocks;
3150 }
3086 3151
3087 start += plane_blocks; 3152 start += plane_blocks;
3088 3153
3089 /* 3154 /*
3090 * allocation for y_plane part of planar format: 3155 * allocation for y_plane part of planar format:
3091 */ 3156 */
3092 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { 3157 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3093 y_data_rate = skl_plane_relative_data_rate(cstate, 3158
3094 pstate, 3159 y_plane_blocks = y_minimum[id];
3095 1); 3160 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3096 y_plane_blocks = y_minimum[id]; 3161 total_data_rate);
3097 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3098 total_data_rate);
3099 3162
3163 if (y_data_rate) {
3100 ddb->y_plane[pipe][id].start = start; 3164 ddb->y_plane[pipe][id].start = start;
3101 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3165 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3102
3103 start += y_plane_blocks;
3104 } 3166 }
3105 3167
3168 start += y_plane_blocks;
3106 } 3169 }
3107 3170
3171 return 0;
3108} 3172}
3109 3173
3110static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 3174static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3161,35 +3225,17 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3161 return ret; 3225 return ret;
3162} 3226}
3163 3227
3164static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, 3228static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3165 const struct intel_crtc *intel_crtc) 3229 struct intel_crtc_state *cstate,
3230 struct intel_plane_state *intel_pstate,
3231 uint16_t ddb_allocation,
3232 int level,
3233 uint16_t *out_blocks, /* out */
3234 uint8_t *out_lines, /* out */
3235 bool *enabled /* out */)
3166{ 3236{
3167 struct drm_device *dev = intel_crtc->base.dev; 3237 struct drm_plane_state *pstate = &intel_pstate->base;
3168 struct drm_i915_private *dev_priv = dev->dev_private; 3238 struct drm_framebuffer *fb = pstate->fb;
3169 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3170
3171 /*
3172 * If ddb allocation of pipes changed, it may require recalculation of
3173 * watermarks
3174 */
3175 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
3176 return true;
3177
3178 return false;
3179}
3180
3181static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3182 struct intel_crtc_state *cstate,
3183 struct intel_plane *intel_plane,
3184 uint16_t ddb_allocation,
3185 int level,
3186 uint16_t *out_blocks, /* out */
3187 uint8_t *out_lines /* out */)
3188{
3189 struct drm_plane *plane = &intel_plane->base;
3190 struct drm_framebuffer *fb = plane->state->fb;
3191 struct intel_plane_state *intel_pstate =
3192 to_intel_plane_state(plane->state);
3193 uint32_t latency = dev_priv->wm.skl_latency[level]; 3239 uint32_t latency = dev_priv->wm.skl_latency[level];
3194 uint32_t method1, method2; 3240 uint32_t method1, method2;
3195 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3241 uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3198,13 +3244,15 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3198 uint8_t cpp; 3244 uint8_t cpp;
3199 uint32_t width = 0, height = 0; 3245 uint32_t width = 0, height = 0;
3200 3246
3201 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) 3247 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
3202 return false; 3248 *enabled = false;
3249 return 0;
3250 }
3203 3251
3204 width = drm_rect_width(&intel_pstate->src) >> 16; 3252 width = drm_rect_width(&intel_pstate->src) >> 16;
3205 height = drm_rect_height(&intel_pstate->src) >> 16; 3253 height = drm_rect_height(&intel_pstate->src) >> 16;
3206 3254
3207 if (intel_rotation_90_or_270(plane->state->rotation)) 3255 if (intel_rotation_90_or_270(pstate->rotation))
3208 swap(width, height); 3256 swap(width, height);
3209 3257
3210 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3258 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3224,7 +3272,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3224 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3272 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3225 uint32_t min_scanlines = 4; 3273 uint32_t min_scanlines = 4;
3226 uint32_t y_tile_minimum; 3274 uint32_t y_tile_minimum;
3227 if (intel_rotation_90_or_270(plane->state->rotation)) { 3275 if (intel_rotation_90_or_270(pstate->rotation)) {
3228 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3276 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3229 drm_format_plane_cpp(fb->pixel_format, 1) : 3277 drm_format_plane_cpp(fb->pixel_format, 1) :
3230 drm_format_plane_cpp(fb->pixel_format, 0); 3278 drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3260,40 +3308,99 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3260 res_blocks++; 3308 res_blocks++;
3261 } 3309 }
3262 3310
3263 if (res_blocks >= ddb_allocation || res_lines > 31) 3311 if (res_blocks >= ddb_allocation || res_lines > 31) {
3264 return false; 3312 *enabled = false;
3313
3314 /*
3315 * If there are no valid level 0 watermarks, then we can't
3316 * support this display configuration.
3317 */
3318 if (level) {
3319 return 0;
3320 } else {
3321 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3322 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3323 to_intel_crtc(cstate->base.crtc)->pipe,
3324 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3325 res_blocks, ddb_allocation, res_lines);
3326
3327 return -EINVAL;
3328 }
3329 }
3265 3330
3266 *out_blocks = res_blocks; 3331 *out_blocks = res_blocks;
3267 *out_lines = res_lines; 3332 *out_lines = res_lines;
3333 *enabled = true;
3268 3334
3269 return true; 3335 return 0;
3270} 3336}
3271 3337
3272static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3338static int
3273 struct skl_ddb_allocation *ddb, 3339skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3274 struct intel_crtc_state *cstate, 3340 struct skl_ddb_allocation *ddb,
3275 int level, 3341 struct intel_crtc_state *cstate,
3276 struct skl_wm_level *result) 3342 int level,
3343 struct skl_wm_level *result)
3277{ 3344{
3278 struct drm_device *dev = dev_priv->dev; 3345 struct drm_device *dev = dev_priv->dev;
3346 struct drm_atomic_state *state = cstate->base.state;
3279 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3347 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3348 struct drm_plane *plane;
3280 struct intel_plane *intel_plane; 3349 struct intel_plane *intel_plane;
3350 struct intel_plane_state *intel_pstate;
3281 uint16_t ddb_blocks; 3351 uint16_t ddb_blocks;
3282 enum pipe pipe = intel_crtc->pipe; 3352 enum pipe pipe = intel_crtc->pipe;
3353 int ret;
3283 3354
3284 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3355 /*
3356 * We'll only calculate watermarks for planes that are actually
3357 * enabled, so make sure all other planes are set as disabled.
3358 */
3359 memset(result, 0, sizeof(*result));
3360
3361 for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
3285 int i = skl_wm_plane_id(intel_plane); 3362 int i = skl_wm_plane_id(intel_plane);
3286 3363
3364 plane = &intel_plane->base;
3365 intel_pstate = NULL;
3366 if (state)
3367 intel_pstate =
3368 intel_atomic_get_existing_plane_state(state,
3369 intel_plane);
3370
3371 /*
3372 * Note: If we start supporting multiple pending atomic commits
3373 * against the same planes/CRTC's in the future, plane->state
3374 * will no longer be the correct pre-state to use for the
3375 * calculations here and we'll need to change where we get the
3376 * 'unchanged' plane data from.
3377 *
3378 * For now this is fine because we only allow one queued commit
3379 * against a CRTC. Even if the plane isn't modified by this
3380 * transaction and we don't have a plane lock, we still have
3381 * the CRTC's lock, so we know that no other transactions are
3382 * racing with us to update it.
3383 */
3384 if (!intel_pstate)
3385 intel_pstate = to_intel_plane_state(plane->state);
3386
3387 WARN_ON(!intel_pstate->base.fb);
3388
3287 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3389 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3288 3390
3289 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3391 ret = skl_compute_plane_wm(dev_priv,
3290 cstate, 3392 cstate,
3291 intel_plane, 3393 intel_pstate,
3292 ddb_blocks, 3394 ddb_blocks,
3293 level, 3395 level,
3294 &result->plane_res_b[i], 3396 &result->plane_res_b[i],
3295 &result->plane_res_l[i]); 3397 &result->plane_res_l[i],
3398 &result->plane_en[i]);
3399 if (ret)
3400 return ret;
3296 } 3401 }
3402
3403 return 0;
3297} 3404}
3298 3405
3299static uint32_t 3406static uint32_t
@@ -3327,21 +3434,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3327 } 3434 }
3328} 3435}
3329 3436
3330static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, 3437static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3331 struct skl_ddb_allocation *ddb, 3438 struct skl_ddb_allocation *ddb,
3332 struct skl_pipe_wm *pipe_wm) 3439 struct skl_pipe_wm *pipe_wm)
3333{ 3440{
3334 struct drm_device *dev = cstate->base.crtc->dev; 3441 struct drm_device *dev = cstate->base.crtc->dev;
3335 const struct drm_i915_private *dev_priv = dev->dev_private; 3442 const struct drm_i915_private *dev_priv = dev->dev_private;
3336 int level, max_level = ilk_wm_max_level(dev); 3443 int level, max_level = ilk_wm_max_level(dev);
3444 int ret;
3337 3445
3338 for (level = 0; level <= max_level; level++) { 3446 for (level = 0; level <= max_level; level++) {
3339 skl_compute_wm_level(dev_priv, ddb, cstate, 3447 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3340 level, &pipe_wm->wm[level]); 3448 level, &pipe_wm->wm[level]);
3449 if (ret)
3450 return ret;
3341 } 3451 }
3342 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3452 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3343 3453
3344 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); 3454 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3455
3456 return 0;
3345} 3457}
3346 3458
3347static void skl_compute_wm_results(struct drm_device *dev, 3459static void skl_compute_wm_results(struct drm_device *dev,
@@ -3421,7 +3533,9 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3421 int i, level, max_level = ilk_wm_max_level(dev); 3533 int i, level, max_level = ilk_wm_max_level(dev);
3422 enum pipe pipe = crtc->pipe; 3534 enum pipe pipe = crtc->pipe;
3423 3535
3424 if (!new->dirty[pipe]) 3536 if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
3537 continue;
3538 if (!crtc->active)
3425 continue; 3539 continue;
3426 3540
3427 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); 3541 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3588,87 +3702,144 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3588 } 3702 }
3589} 3703}
3590 3704
3591static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3705static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3592 struct skl_ddb_allocation *ddb, /* out */ 3706 struct skl_ddb_allocation *ddb, /* out */
3593 struct skl_pipe_wm *pipe_wm /* out */) 3707 struct skl_pipe_wm *pipe_wm, /* out */
3708 bool *changed /* out */)
3594{ 3709{
3595 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3710 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3596 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3711 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3712 int ret;
3597 3713
3598 skl_allocate_pipe_ddb(cstate, ddb); 3714 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3599 skl_compute_pipe_wm(cstate, ddb, pipe_wm); 3715 if (ret)
3716 return ret;
3600 3717
3601 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) 3718 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3602 return false; 3719 *changed = false;
3603 3720 else
3604 intel_crtc->wm.active.skl = *pipe_wm; 3721 *changed = true;
3605 3722
3606 return true; 3723 return 0;
3607} 3724}
3608 3725
3609static void skl_update_other_pipe_wm(struct drm_device *dev, 3726static int
3610 struct drm_crtc *crtc, 3727skl_compute_ddb(struct drm_atomic_state *state)
3611 struct skl_wm_values *r)
3612{ 3728{
3729 struct drm_device *dev = state->dev;
3730 struct drm_i915_private *dev_priv = to_i915(dev);
3731 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3613 struct intel_crtc *intel_crtc; 3732 struct intel_crtc *intel_crtc;
3614 struct intel_crtc *this_crtc = to_intel_crtc(crtc); 3733 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3734 unsigned realloc_pipes = dev_priv->active_crtcs;
3735 int ret;
3615 3736
3616 /* 3737 /*
3617 * If the WM update hasn't changed the allocation for this_crtc (the 3738 * If this is our first atomic update following hardware readout,
3618 * crtc we are currently computing the new WM values for), other 3739 * we can't trust the DDB that the BIOS programmed for us. Let's
3619 * enabled crtcs will keep the same allocation and we don't need to 3740 * pretend that all pipes switched active status so that we'll
3620 * recompute anything for them. 3741 * ensure a full DDB recompute.
3621 */ 3742 */
3622 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) 3743 if (dev_priv->wm.distrust_bios_wm)
3623 return; 3744 intel_state->active_pipe_changes = ~0;
3624 3745
3625 /* 3746 /*
3626 * Otherwise, because of this_crtc being freshly enabled/disabled, the 3747 * If the modeset changes which CRTC's are active, we need to
3627 * other active pipes need new DDB allocation and WM values. 3748 * recompute the DDB allocation for *all* active pipes, even
3749 * those that weren't otherwise being modified in any way by this
3750 * atomic commit. Due to the shrinking of the per-pipe allocations
3751 * when new active CRTC's are added, it's possible for a pipe that
3752 * we were already using and aren't changing at all here to suddenly
3753 * become invalid if its DDB needs exceeds its new allocation.
3754 *
3755 * Note that if we wind up doing a full DDB recompute, we can't let
3756 * any other display updates race with this transaction, so we need
3757 * to grab the lock on *all* CRTC's.
3628 */ 3758 */
3629 for_each_intel_crtc(dev, intel_crtc) { 3759 if (intel_state->active_pipe_changes) {
3630 struct skl_pipe_wm pipe_wm = {}; 3760 realloc_pipes = ~0;
3631 bool wm_changed; 3761 intel_state->wm_results.dirty_pipes = ~0;
3632 3762 }
3633 if (this_crtc->pipe == intel_crtc->pipe)
3634 continue;
3635
3636 if (!intel_crtc->active)
3637 continue;
3638 3763
3639 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3764 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3640 &r->ddb, &pipe_wm); 3765 struct intel_crtc_state *cstate;
3641 3766
3642 /* 3767 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3643 * If we end up re-computing the other pipe WM values, it's 3768 if (IS_ERR(cstate))
3644 * because it was really needed, so we expect the WM values to 3769 return PTR_ERR(cstate);
3645 * be different.
3646 */
3647 WARN_ON(!wm_changed);
3648 3770
3649 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); 3771 ret = skl_allocate_pipe_ddb(cstate, ddb);
3650 r->dirty[intel_crtc->pipe] = true; 3772 if (ret)
3773 return ret;
3651 } 3774 }
3775
3776 return 0;
3652} 3777}
3653 3778
3654static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) 3779static int
3780skl_compute_wm(struct drm_atomic_state *state)
3655{ 3781{
3656 watermarks->wm_linetime[pipe] = 0; 3782 struct drm_crtc *crtc;
3657 memset(watermarks->plane[pipe], 0, 3783 struct drm_crtc_state *cstate;
3658 sizeof(uint32_t) * 8 * I915_MAX_PLANES); 3784 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3659 memset(watermarks->plane_trans[pipe], 3785 struct skl_wm_values *results = &intel_state->wm_results;
3660 0, sizeof(uint32_t) * I915_MAX_PLANES); 3786 struct skl_pipe_wm *pipe_wm;
3661 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; 3787 bool changed = false;
3788 int ret, i;
3662 3789
3663 /* Clear ddb entries for pipe */ 3790 /*
3664 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); 3791 * If this transaction isn't actually touching any CRTC's, don't
3665 memset(&watermarks->ddb.plane[pipe], 0, 3792 * bother with watermark calculation. Note that if we pass this
3666 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3793 * test, we're guaranteed to hold at least one CRTC state mutex,
3667 memset(&watermarks->ddb.y_plane[pipe], 0, 3794 * which means we can safely use values like dev_priv->active_crtcs
3668 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3795 * since any racing commits that want to update them would need to
3669 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, 3796 * hold _all_ CRTC state mutexes.
3670 sizeof(struct skl_ddb_entry)); 3797 */
3798 for_each_crtc_in_state(state, crtc, cstate, i)
3799 changed = true;
3800 if (!changed)
3801 return 0;
3802
3803 /* Clear all dirty flags */
3804 results->dirty_pipes = 0;
3805
3806 ret = skl_compute_ddb(state);
3807 if (ret)
3808 return ret;
3809
3810 /*
3811 * Calculate WM's for all pipes that are part of this transaction.
3812 * Note that the DDB allocation above may have added more CRTC's that
3813 * weren't otherwise being modified (and set bits in dirty_pipes) if
3814 * pipe allocations had to change.
3815 *
3816 * FIXME: Now that we're doing this in the atomic check phase, we
3817 * should allow skl_update_pipe_wm() to return failure in cases where
3818 * no suitable watermark values can be found.
3819 */
3820 for_each_crtc_in_state(state, crtc, cstate, i) {
3821 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3822 struct intel_crtc_state *intel_cstate =
3823 to_intel_crtc_state(cstate);
3824
3825 pipe_wm = &intel_cstate->wm.skl.optimal;
3826 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
3827 &changed);
3828 if (ret)
3829 return ret;
3830
3831 if (changed)
3832 results->dirty_pipes |= drm_crtc_mask(crtc);
3833
3834 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3835 /* This pipe's WM's did not change */
3836 continue;
3837
3838 intel_cstate->update_wm_pre = true;
3839 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
3840 }
3671 3841
3842 return 0;
3672} 3843}
3673 3844
3674static void skl_update_wm(struct drm_crtc *crtc) 3845static void skl_update_wm(struct drm_crtc *crtc)
@@ -3678,26 +3849,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
3678 struct drm_i915_private *dev_priv = dev->dev_private; 3849 struct drm_i915_private *dev_priv = dev->dev_private;
3679 struct skl_wm_values *results = &dev_priv->wm.skl_results; 3850 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3680 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3851 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3681 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; 3852 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
3682
3683
3684 /* Clear all dirty flags */
3685 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3686
3687 skl_clear_wm(results, intel_crtc->pipe);
3688 3853
3689 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm)) 3854 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3690 return; 3855 return;
3691 3856
3692 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); 3857 intel_crtc->wm.active.skl = *pipe_wm;
3693 results->dirty[intel_crtc->pipe] = true; 3858
3859 mutex_lock(&dev_priv->wm.wm_mutex);
3694 3860
3695 skl_update_other_pipe_wm(dev, crtc, results);
3696 skl_write_wm_values(dev_priv, results); 3861 skl_write_wm_values(dev_priv, results);
3697 skl_flush_wm_values(dev_priv, results); 3862 skl_flush_wm_values(dev_priv, results);
3698 3863
3699 /* store the new configuration */ 3864 /* store the new configuration */
3700 dev_priv->wm.skl_hw = *results; 3865 dev_priv->wm.skl_hw = *results;
3866
3867 mutex_unlock(&dev_priv->wm.wm_mutex);
3701} 3868}
3702 3869
3703static void ilk_compute_wm_config(struct drm_device *dev, 3870static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3757,7 +3924,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
3757 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3924 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3758 3925
3759 mutex_lock(&dev_priv->wm.wm_mutex); 3926 mutex_lock(&dev_priv->wm.wm_mutex);
3760 intel_crtc->wm.active.ilk = cstate->wm.intermediate; 3927 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
3761 ilk_program_watermarks(dev_priv); 3928 ilk_program_watermarks(dev_priv);
3762 mutex_unlock(&dev_priv->wm.wm_mutex); 3929 mutex_unlock(&dev_priv->wm.wm_mutex);
3763} 3930}
@@ -3769,7 +3936,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
3769 3936
3770 mutex_lock(&dev_priv->wm.wm_mutex); 3937 mutex_lock(&dev_priv->wm.wm_mutex);
3771 if (cstate->wm.need_postvbl_update) { 3938 if (cstate->wm.need_postvbl_update) {
3772 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; 3939 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
3773 ilk_program_watermarks(dev_priv); 3940 ilk_program_watermarks(dev_priv);
3774 } 3941 }
3775 mutex_unlock(&dev_priv->wm.wm_mutex); 3942 mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3826,7 +3993,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3826 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 3993 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3994 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3828 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3995 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3829 struct skl_pipe_wm *active = &cstate->wm.optimal.skl; 3996 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
3830 enum pipe pipe = intel_crtc->pipe; 3997 enum pipe pipe = intel_crtc->pipe;
3831 int level, i, max_level; 3998 int level, i, max_level;
3832 uint32_t temp; 3999 uint32_t temp;
@@ -3849,7 +4016,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3849 if (!intel_crtc->active) 4016 if (!intel_crtc->active)
3850 return; 4017 return;
3851 4018
3852 hw->dirty[pipe] = true; 4019 hw->dirty_pipes |= drm_crtc_mask(crtc);
3853 4020
3854 active->linetime = hw->wm_linetime[pipe]; 4021 active->linetime = hw->wm_linetime[pipe];
3855 4022
@@ -3879,10 +4046,36 @@ void skl_wm_get_hw_state(struct drm_device *dev)
3879 struct drm_i915_private *dev_priv = dev->dev_private; 4046 struct drm_i915_private *dev_priv = dev->dev_private;
3880 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 4047 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3881 struct drm_crtc *crtc; 4048 struct drm_crtc *crtc;
4049 struct intel_crtc *intel_crtc;
3882 4050
3883 skl_ddb_get_hw_state(dev_priv, ddb); 4051 skl_ddb_get_hw_state(dev_priv, ddb);
3884 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 4052 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3885 skl_pipe_wm_get_hw_state(crtc); 4053 skl_pipe_wm_get_hw_state(crtc);
4054
4055 if (dev_priv->active_crtcs) {
4056 /* Fully recompute DDB on first atomic commit */
4057 dev_priv->wm.distrust_bios_wm = true;
4058 } else {
4059 /* Easy/common case; just sanitize DDB now if everything off */
4060 memset(ddb, 0, sizeof(*ddb));
4061 }
4062
4063 /* Calculate plane data rates */
4064 for_each_intel_crtc(dev, intel_crtc) {
4065 struct intel_crtc_state *cstate = intel_crtc->config;
4066 struct intel_plane *intel_plane;
4067
4068 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4069 const struct drm_plane_state *pstate =
4070 intel_plane->base.state;
4071 int id = skl_wm_plane_id(intel_plane);
4072
4073 cstate->wm.skl.plane_data_rate[id] =
4074 skl_plane_relative_data_rate(cstate, pstate, 0);
4075 cstate->wm.skl.plane_y_data_rate[id] =
4076 skl_plane_relative_data_rate(cstate, pstate, 1);
4077 }
4078 }
3886} 4079}
3887 4080
3888static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4081static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
@@ -3892,7 +4085,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3892 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4085 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3893 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3894 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4087 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3895 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; 4088 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
3896 enum pipe pipe = intel_crtc->pipe; 4089 enum pipe pipe = intel_crtc->pipe;
3897 static const i915_reg_t wm0_pipe_reg[] = { 4090 static const i915_reg_t wm0_pipe_reg[] = {
3898 [PIPE_A] = WM0_PIPEA_ILK, 4091 [PIPE_A] = WM0_PIPEA_ILK,
@@ -4169,9 +4362,8 @@ DEFINE_SPINLOCK(mchdev_lock);
4169 * mchdev_lock. */ 4362 * mchdev_lock. */
4170static struct drm_i915_private *i915_mch_dev; 4363static struct drm_i915_private *i915_mch_dev;
4171 4364
4172bool ironlake_set_drps(struct drm_device *dev, u8 val) 4365bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4173{ 4366{
4174 struct drm_i915_private *dev_priv = dev->dev_private;
4175 u16 rgvswctl; 4367 u16 rgvswctl;
4176 4368
4177 assert_spin_locked(&mchdev_lock); 4369 assert_spin_locked(&mchdev_lock);
@@ -4193,9 +4385,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
4193 return true; 4385 return true;
4194} 4386}
4195 4387
4196static void ironlake_enable_drps(struct drm_device *dev) 4388static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4197{ 4389{
4198 struct drm_i915_private *dev_priv = dev->dev_private;
4199 u32 rgvmodectl; 4390 u32 rgvmodectl;
4200 u8 fmax, fmin, fstart, vstart; 4391 u8 fmax, fmin, fstart, vstart;
4201 4392
@@ -4252,7 +4443,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
4252 DRM_ERROR("stuck trying to change perf mode\n"); 4443 DRM_ERROR("stuck trying to change perf mode\n");
4253 mdelay(1); 4444 mdelay(1);
4254 4445
4255 ironlake_set_drps(dev, fstart); 4446 ironlake_set_drps(dev_priv, fstart);
4256 4447
4257 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 4448 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4258 I915_READ(DDREC) + I915_READ(CSIEC); 4449 I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4263,9 +4454,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
4263 spin_unlock_irq(&mchdev_lock); 4454 spin_unlock_irq(&mchdev_lock);
4264} 4455}
4265 4456
4266static void ironlake_disable_drps(struct drm_device *dev) 4457static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4267{ 4458{
4268 struct drm_i915_private *dev_priv = dev->dev_private;
4269 u16 rgvswctl; 4459 u16 rgvswctl;
4270 4460
4271 spin_lock_irq(&mchdev_lock); 4461 spin_lock_irq(&mchdev_lock);
@@ -4280,7 +4470,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
4280 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 4470 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4281 4471
4282 /* Go back to the starting frequency */ 4472 /* Go back to the starting frequency */
4283 ironlake_set_drps(dev, dev_priv->ips.fstart); 4473 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4284 mdelay(1); 4474 mdelay(1);
4285 rgvswctl |= MEMCTL_CMD_STS; 4475 rgvswctl |= MEMCTL_CMD_STS;
4286 I915_WRITE(MEMSWCTL, rgvswctl); 4476 I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4424,12 +4614,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4424/* gen6_set_rps is called to update the frequency request, but should also be 4614/* gen6_set_rps is called to update the frequency request, but should also be
4425 * called when the range (min_delay and max_delay) is modified so that we can 4615 * called when the range (min_delay and max_delay) is modified so that we can
4426 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4616 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4427static void gen6_set_rps(struct drm_device *dev, u8 val) 4617static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4428{ 4618{
4429 struct drm_i915_private *dev_priv = dev->dev_private;
4430
4431 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4619 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4432 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 4620 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4433 return; 4621 return;
4434 4622
4435 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4623 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4442,10 +4630,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4442 if (val != dev_priv->rps.cur_freq) { 4630 if (val != dev_priv->rps.cur_freq) {
4443 gen6_set_rps_thresholds(dev_priv, val); 4631 gen6_set_rps_thresholds(dev_priv, val);
4444 4632
4445 if (IS_GEN9(dev)) 4633 if (IS_GEN9(dev_priv))
4446 I915_WRITE(GEN6_RPNSWREQ, 4634 I915_WRITE(GEN6_RPNSWREQ,
4447 GEN9_FREQUENCY(val)); 4635 GEN9_FREQUENCY(val));
4448 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4636 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4449 I915_WRITE(GEN6_RPNSWREQ, 4637 I915_WRITE(GEN6_RPNSWREQ,
4450 HSW_FREQUENCY(val)); 4638 HSW_FREQUENCY(val));
4451 else 4639 else
@@ -4467,15 +4655,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4467 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4655 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4468} 4656}
4469 4657
4470static void valleyview_set_rps(struct drm_device *dev, u8 val) 4658static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4471{ 4659{
4472 struct drm_i915_private *dev_priv = dev->dev_private;
4473
4474 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4660 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4475 WARN_ON(val > dev_priv->rps.max_freq); 4661 WARN_ON(val > dev_priv->rps.max_freq);
4476 WARN_ON(val < dev_priv->rps.min_freq); 4662 WARN_ON(val < dev_priv->rps.min_freq);
4477 4663
4478 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 4664 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4479 "Odd GPU freq value\n")) 4665 "Odd GPU freq value\n"))
4480 val &= ~1; 4666 val &= ~1;
4481 4667
@@ -4508,7 +4694,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4508 /* Wake up the media well, as that takes a lot less 4694 /* Wake up the media well, as that takes a lot less
4509 * power than the Render well. */ 4695 * power than the Render well. */
4510 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 4696 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4511 valleyview_set_rps(dev_priv->dev, val); 4697 valleyview_set_rps(dev_priv, val);
4512 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 4698 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4513} 4699}
4514 4700
@@ -4526,14 +4712,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
4526 4712
4527void gen6_rps_idle(struct drm_i915_private *dev_priv) 4713void gen6_rps_idle(struct drm_i915_private *dev_priv)
4528{ 4714{
4529 struct drm_device *dev = dev_priv->dev;
4530
4531 mutex_lock(&dev_priv->rps.hw_lock); 4715 mutex_lock(&dev_priv->rps.hw_lock);
4532 if (dev_priv->rps.enabled) { 4716 if (dev_priv->rps.enabled) {
4533 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4717 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4534 vlv_set_rps_idle(dev_priv); 4718 vlv_set_rps_idle(dev_priv);
4535 else 4719 else
4536 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4720 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4537 dev_priv->rps.last_adj = 0; 4721 dev_priv->rps.last_adj = 0;
4538 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4722 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4539 } 4723 }
@@ -4581,49 +4765,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4581 spin_unlock(&dev_priv->rps.client_lock); 4765 spin_unlock(&dev_priv->rps.client_lock);
4582} 4766}
4583 4767
4584void intel_set_rps(struct drm_device *dev, u8 val) 4768void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
4585{ 4769{
4586 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4770 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4587 valleyview_set_rps(dev, val); 4771 valleyview_set_rps(dev_priv, val);
4588 else 4772 else
4589 gen6_set_rps(dev, val); 4773 gen6_set_rps(dev_priv, val);
4590} 4774}
4591 4775
4592static void gen9_disable_rc6(struct drm_device *dev) 4776static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
4593{ 4777{
4594 struct drm_i915_private *dev_priv = dev->dev_private;
4595
4596 I915_WRITE(GEN6_RC_CONTROL, 0); 4778 I915_WRITE(GEN6_RC_CONTROL, 0);
4597 I915_WRITE(GEN9_PG_ENABLE, 0); 4779 I915_WRITE(GEN9_PG_ENABLE, 0);
4598} 4780}
4599 4781
4600static void gen9_disable_rps(struct drm_device *dev) 4782static void gen9_disable_rps(struct drm_i915_private *dev_priv)
4601{ 4783{
4602 struct drm_i915_private *dev_priv = dev->dev_private;
4603
4604 I915_WRITE(GEN6_RP_CONTROL, 0); 4784 I915_WRITE(GEN6_RP_CONTROL, 0);
4605} 4785}
4606 4786
4607static void gen6_disable_rps(struct drm_device *dev) 4787static void gen6_disable_rps(struct drm_i915_private *dev_priv)
4608{ 4788{
4609 struct drm_i915_private *dev_priv = dev->dev_private;
4610
4611 I915_WRITE(GEN6_RC_CONTROL, 0); 4789 I915_WRITE(GEN6_RC_CONTROL, 0);
4612 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4790 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4613 I915_WRITE(GEN6_RP_CONTROL, 0); 4791 I915_WRITE(GEN6_RP_CONTROL, 0);
4614} 4792}
4615 4793
4616static void cherryview_disable_rps(struct drm_device *dev) 4794static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
4617{ 4795{
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4619
4620 I915_WRITE(GEN6_RC_CONTROL, 0); 4796 I915_WRITE(GEN6_RC_CONTROL, 0);
4621} 4797}
4622 4798
4623static void valleyview_disable_rps(struct drm_device *dev) 4799static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
4624{ 4800{
4625 struct drm_i915_private *dev_priv = dev->dev_private;
4626
4627 /* we're doing forcewake before Disabling RC6, 4801 /* we're doing forcewake before Disabling RC6,
4628 * This what the BIOS expects when going into suspend */ 4802 * This what the BIOS expects when going into suspend */
4629 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4803 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4633,15 +4807,15 @@ static void valleyview_disable_rps(struct drm_device *dev)
4633 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4807 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4634} 4808}
4635 4809
4636static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4810static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4637{ 4811{
4638 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 4812 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4639 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 4813 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4640 mode = GEN6_RC_CTL_RC6_ENABLE; 4814 mode = GEN6_RC_CTL_RC6_ENABLE;
4641 else 4815 else
4642 mode = 0; 4816 mode = 0;
4643 } 4817 }
4644 if (HAS_RC6p(dev)) 4818 if (HAS_RC6p(dev_priv))
4645 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4819 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4646 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 4820 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4647 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 4821 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
@@ -4652,9 +4826,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4652 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 4826 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4653} 4827}
4654 4828
4655static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) 4829static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4656{ 4830{
4657 struct drm_i915_private *dev_priv = to_i915(dev);
4658 struct i915_ggtt *ggtt = &dev_priv->ggtt; 4831 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4659 bool enable_rc6 = true; 4832 bool enable_rc6 = true;
4660 unsigned long rc6_ctx_base; 4833 unsigned long rc6_ctx_base;
@@ -4695,16 +4868,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
4695 return enable_rc6; 4868 return enable_rc6;
4696} 4869}
4697 4870
4698int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 4871int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
4699{ 4872{
4700 /* No RC6 before Ironlake and code is gone for ilk. */ 4873 /* No RC6 before Ironlake and code is gone for ilk. */
4701 if (INTEL_INFO(dev)->gen < 6) 4874 if (INTEL_INFO(dev_priv)->gen < 6)
4702 return 0; 4875 return 0;
4703 4876
4704 if (!enable_rc6) 4877 if (!enable_rc6)
4705 return 0; 4878 return 0;
4706 4879
4707 if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { 4880 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
4708 DRM_INFO("RC6 disabled by BIOS\n"); 4881 DRM_INFO("RC6 disabled by BIOS\n");
4709 return 0; 4882 return 0;
4710 } 4883 }
@@ -4713,7 +4886,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4713 if (enable_rc6 >= 0) { 4886 if (enable_rc6 >= 0) {
4714 int mask; 4887 int mask;
4715 4888
4716 if (HAS_RC6p(dev)) 4889 if (HAS_RC6p(dev_priv))
4717 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 4890 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4718 INTEL_RC6pp_ENABLE; 4891 INTEL_RC6pp_ENABLE;
4719 else 4892 else
@@ -4726,20 +4899,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4726 return enable_rc6 & mask; 4899 return enable_rc6 & mask;
4727 } 4900 }
4728 4901
4729 if (IS_IVYBRIDGE(dev)) 4902 if (IS_IVYBRIDGE(dev_priv))
4730 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 4903 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4731 4904
4732 return INTEL_RC6_ENABLE; 4905 return INTEL_RC6_ENABLE;
4733} 4906}
4734 4907
4735int intel_enable_rc6(const struct drm_device *dev) 4908static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
4736{ 4909{
4737 return i915.enable_rc6;
4738}
4739
4740static void gen6_init_rps_frequencies(struct drm_device *dev)
4741{
4742 struct drm_i915_private *dev_priv = dev->dev_private;
4743 uint32_t rp_state_cap; 4910 uint32_t rp_state_cap;
4744 u32 ddcc_status = 0; 4911 u32 ddcc_status = 0;
4745 int ret; 4912 int ret;
@@ -4747,7 +4914,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4747 /* All of these values are in units of 50MHz */ 4914 /* All of these values are in units of 50MHz */
4748 dev_priv->rps.cur_freq = 0; 4915 dev_priv->rps.cur_freq = 0;
4749 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 4916 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4750 if (IS_BROXTON(dev)) { 4917 if (IS_BROXTON(dev_priv)) {
4751 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 4918 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4752 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 4919 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4753 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 4920 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -4763,8 +4930,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4763 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 4930 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4764 4931
4765 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 4932 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4766 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || 4933 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
4767 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 4934 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4768 ret = sandybridge_pcode_read(dev_priv, 4935 ret = sandybridge_pcode_read(dev_priv,
4769 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 4936 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4770 &ddcc_status); 4937 &ddcc_status);
@@ -4776,7 +4943,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4776 dev_priv->rps.max_freq); 4943 dev_priv->rps.max_freq);
4777 } 4944 }
4778 4945
4779 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 4946 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4780 /* Store the frequency values in 16.66 MHZ units, which is 4947 /* Store the frequency values in 16.66 MHZ units, which is
4781 the natural hardware unit for SKL */ 4948 the natural hardware unit for SKL */
4782 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 4949 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4793,7 +4960,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4793 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4960 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4794 4961
4795 if (dev_priv->rps.min_freq_softlimit == 0) { 4962 if (dev_priv->rps.min_freq_softlimit == 0) {
4796 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4963 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4797 dev_priv->rps.min_freq_softlimit = 4964 dev_priv->rps.min_freq_softlimit =
4798 max_t(int, dev_priv->rps.efficient_freq, 4965 max_t(int, dev_priv->rps.efficient_freq,
4799 intel_freq_opcode(dev_priv, 450)); 4966 intel_freq_opcode(dev_priv, 450));
@@ -4804,16 +4971,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4804} 4971}
4805 4972
4806/* See the Gen9_GT_PM_Programming_Guide doc for the below */ 4973/* See the Gen9_GT_PM_Programming_Guide doc for the below */
4807static void gen9_enable_rps(struct drm_device *dev) 4974static void gen9_enable_rps(struct drm_i915_private *dev_priv)
4808{ 4975{
4809 struct drm_i915_private *dev_priv = dev->dev_private;
4810
4811 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4976 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4812 4977
4813 gen6_init_rps_frequencies(dev); 4978 gen6_init_rps_frequencies(dev_priv);
4814 4979
4815 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4980 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4816 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 4981 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4817 /* 4982 /*
4818 * BIOS could leave the Hw Turbo enabled, so need to explicitly 4983 * BIOS could leave the Hw Turbo enabled, so need to explicitly
4819 * clear out the Control register just to avoid inconsitency 4984 * clear out the Control register just to avoid inconsitency
@@ -4823,7 +4988,7 @@ static void gen9_enable_rps(struct drm_device *dev)
4823 * if the Turbo is left enabled in the Control register, as the 4988 * if the Turbo is left enabled in the Control register, as the
4824 * Up/Down interrupts would remain masked. 4989 * Up/Down interrupts would remain masked.
4825 */ 4990 */
4826 gen9_disable_rps(dev); 4991 gen9_disable_rps(dev_priv);
4827 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4992 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4828 return; 4993 return;
4829 } 4994 }
@@ -4842,14 +5007,13 @@ static void gen9_enable_rps(struct drm_device *dev)
4842 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 5007 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4843 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 5008 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4844 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5009 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4845 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5010 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4846 5011
4847 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5012 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4848} 5013}
4849 5014
4850static void gen9_enable_rc6(struct drm_device *dev) 5015static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
4851{ 5016{
4852 struct drm_i915_private *dev_priv = dev->dev_private;
4853 struct intel_engine_cs *engine; 5017 struct intel_engine_cs *engine;
4854 uint32_t rc6_mask = 0; 5018 uint32_t rc6_mask = 0;
4855 5019
@@ -4866,7 +5030,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4866 /* 2b: Program RC6 thresholds.*/ 5030 /* 2b: Program RC6 thresholds.*/
4867 5031
4868 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 5032 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4869 if (IS_SKYLAKE(dev)) 5033 if (IS_SKYLAKE(dev_priv))
4870 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 5034 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4871 else 5035 else
4872 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 5036 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4875,7 +5039,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4875 for_each_engine(engine, dev_priv) 5039 for_each_engine(engine, dev_priv)
4876 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5040 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4877 5041
4878 if (HAS_GUC_UCODE(dev)) 5042 if (HAS_GUC_UCODE(dev_priv))
4879 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 5043 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4880 5044
4881 I915_WRITE(GEN6_RC_SLEEP, 0); 5045 I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -4885,12 +5049,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
4885 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 5049 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4886 5050
4887 /* 3a: Enable RC6 */ 5051 /* 3a: Enable RC6 */
4888 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5052 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4889 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5053 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4890 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); 5054 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
4891 /* WaRsUseTimeoutMode */ 5055 /* WaRsUseTimeoutMode */
4892 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 5056 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
4893 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 5057 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4894 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 5058 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4895 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5059 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4896 GEN7_RC_CTL_TO_MODE | 5060 GEN7_RC_CTL_TO_MODE |
@@ -4906,19 +5070,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
4906 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 5070 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4907 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 5071 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4908 */ 5072 */
4909 if (NEEDS_WaRsDisableCoarsePowerGating(dev)) 5073 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
4910 I915_WRITE(GEN9_PG_ENABLE, 0); 5074 I915_WRITE(GEN9_PG_ENABLE, 0);
4911 else 5075 else
4912 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 5076 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4913 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 5077 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4914 5078
4915 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5079 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4916
4917} 5080}
4918 5081
4919static void gen8_enable_rps(struct drm_device *dev) 5082static void gen8_enable_rps(struct drm_i915_private *dev_priv)
4920{ 5083{
4921 struct drm_i915_private *dev_priv = dev->dev_private;
4922 struct intel_engine_cs *engine; 5084 struct intel_engine_cs *engine;
4923 uint32_t rc6_mask = 0; 5085 uint32_t rc6_mask = 0;
4924 5086
@@ -4933,7 +5095,7 @@ static void gen8_enable_rps(struct drm_device *dev)
4933 I915_WRITE(GEN6_RC_CONTROL, 0); 5095 I915_WRITE(GEN6_RC_CONTROL, 0);
4934 5096
4935 /* Initialize rps frequencies */ 5097 /* Initialize rps frequencies */
4936 gen6_init_rps_frequencies(dev); 5098 gen6_init_rps_frequencies(dev_priv);
4937 5099
4938 /* 2b: Program RC6 thresholds.*/ 5100 /* 2b: Program RC6 thresholds.*/
4939 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5101 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4942,16 +5104,16 @@ static void gen8_enable_rps(struct drm_device *dev)
4942 for_each_engine(engine, dev_priv) 5104 for_each_engine(engine, dev_priv)
4943 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5105 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4944 I915_WRITE(GEN6_RC_SLEEP, 0); 5106 I915_WRITE(GEN6_RC_SLEEP, 0);
4945 if (IS_BROADWELL(dev)) 5107 if (IS_BROADWELL(dev_priv))
4946 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 5108 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4947 else 5109 else
4948 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 5110 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4949 5111
4950 /* 3: Enable RC6 */ 5112 /* 3: Enable RC6 */
4951 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5113 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4952 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5114 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4953 intel_print_rc6_info(dev, rc6_mask); 5115 intel_print_rc6_info(dev_priv, rc6_mask);
4954 if (IS_BROADWELL(dev)) 5116 if (IS_BROADWELL(dev_priv))
4955 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5117 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4956 GEN7_RC_CTL_TO_MODE | 5118 GEN7_RC_CTL_TO_MODE |
4957 rc6_mask); 5119 rc6_mask);
@@ -4992,14 +5154,13 @@ static void gen8_enable_rps(struct drm_device *dev)
4992 /* 6: Ring frequency + overclocking (our driver does this later */ 5154 /* 6: Ring frequency + overclocking (our driver does this later */
4993 5155
4994 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5156 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4995 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5157 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4996 5158
4997 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5159 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4998} 5160}
4999 5161
5000static void gen6_enable_rps(struct drm_device *dev) 5162static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5001{ 5163{
5002 struct drm_i915_private *dev_priv = dev->dev_private;
5003 struct intel_engine_cs *engine; 5164 struct intel_engine_cs *engine;
5004 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 5165 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
5005 u32 gtfifodbg; 5166 u32 gtfifodbg;
@@ -5026,7 +5187,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5026 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5187 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5027 5188
5028 /* Initialize rps frequencies */ 5189 /* Initialize rps frequencies */
5029 gen6_init_rps_frequencies(dev); 5190 gen6_init_rps_frequencies(dev_priv);
5030 5191
5031 /* disable the counters and set deterministic thresholds */ 5192 /* disable the counters and set deterministic thresholds */
5032 I915_WRITE(GEN6_RC_CONTROL, 0); 5193 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5042,7 +5203,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5042 5203
5043 I915_WRITE(GEN6_RC_SLEEP, 0); 5204 I915_WRITE(GEN6_RC_SLEEP, 0);
5044 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 5205 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5045 if (IS_IVYBRIDGE(dev)) 5206 if (IS_IVYBRIDGE(dev_priv))
5046 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 5207 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5047 else 5208 else
5048 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 5209 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5050,12 +5211,12 @@ static void gen6_enable_rps(struct drm_device *dev)
5050 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 5211 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5051 5212
5052 /* Check if we are enabling RC6 */ 5213 /* Check if we are enabling RC6 */
5053 rc6_mode = intel_enable_rc6(dev_priv->dev); 5214 rc6_mode = intel_enable_rc6();
5054 if (rc6_mode & INTEL_RC6_ENABLE) 5215 if (rc6_mode & INTEL_RC6_ENABLE)
5055 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 5216 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5056 5217
5057 /* We don't use those on Haswell */ 5218 /* We don't use those on Haswell */
5058 if (!IS_HASWELL(dev)) { 5219 if (!IS_HASWELL(dev_priv)) {
5059 if (rc6_mode & INTEL_RC6p_ENABLE) 5220 if (rc6_mode & INTEL_RC6p_ENABLE)
5060 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 5221 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5061 5222
@@ -5063,7 +5224,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5063 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 5224 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5064 } 5225 }
5065 5226
5066 intel_print_rc6_info(dev, rc6_mask); 5227 intel_print_rc6_info(dev_priv, rc6_mask);
5067 5228
5068 I915_WRITE(GEN6_RC_CONTROL, 5229 I915_WRITE(GEN6_RC_CONTROL,
5069 rc6_mask | 5230 rc6_mask |
@@ -5087,13 +5248,13 @@ static void gen6_enable_rps(struct drm_device *dev)
5087 } 5248 }
5088 5249
5089 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5250 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5090 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5251 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5091 5252
5092 rc6vids = 0; 5253 rc6vids = 0;
5093 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 5254 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5094 if (IS_GEN6(dev) && ret) { 5255 if (IS_GEN6(dev_priv) && ret) {
5095 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 5256 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5096 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 5257 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5097 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 5258 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5098 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 5259 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5099 rc6vids &= 0xffff00; 5260 rc6vids &= 0xffff00;
@@ -5106,9 +5267,8 @@ static void gen6_enable_rps(struct drm_device *dev)
5106 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5267 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5107} 5268}
5108 5269
5109static void __gen6_update_ring_freq(struct drm_device *dev) 5270static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5110{ 5271{
5111 struct drm_i915_private *dev_priv = dev->dev_private;
5112 int min_freq = 15; 5272 int min_freq = 15;
5113 unsigned int gpu_freq; 5273 unsigned int gpu_freq;
5114 unsigned int max_ia_freq, min_ring_freq; 5274 unsigned int max_ia_freq, min_ring_freq;
@@ -5137,7 +5297,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5137 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 5297 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5138 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 5298 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5139 5299
5140 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5300 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5141 /* Convert GT frequency to 50 HZ units */ 5301 /* Convert GT frequency to 50 HZ units */
5142 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 5302 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5143 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 5303 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5155,16 +5315,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5155 int diff = max_gpu_freq - gpu_freq; 5315 int diff = max_gpu_freq - gpu_freq;
5156 unsigned int ia_freq = 0, ring_freq = 0; 5316 unsigned int ia_freq = 0, ring_freq = 0;
5157 5317
5158 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5318 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5159 /* 5319 /*
5160 * ring_freq = 2 * GT. ring_freq is in 100MHz units 5320 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5161 * No floor required for ring frequency on SKL. 5321 * No floor required for ring frequency on SKL.
5162 */ 5322 */
5163 ring_freq = gpu_freq; 5323 ring_freq = gpu_freq;
5164 } else if (INTEL_INFO(dev)->gen >= 8) { 5324 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5165 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 5325 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5166 ring_freq = max(min_ring_freq, gpu_freq); 5326 ring_freq = max(min_ring_freq, gpu_freq);
5167 } else if (IS_HASWELL(dev)) { 5327 } else if (IS_HASWELL(dev_priv)) {
5168 ring_freq = mult_frac(gpu_freq, 5, 4); 5328 ring_freq = mult_frac(gpu_freq, 5, 4);
5169 ring_freq = max(min_ring_freq, ring_freq); 5329 ring_freq = max(min_ring_freq, ring_freq);
5170 /* leave ia_freq as the default, chosen by cpufreq */ 5330 /* leave ia_freq as the default, chosen by cpufreq */
@@ -5191,26 +5351,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5191 } 5351 }
5192} 5352}
5193 5353
5194void gen6_update_ring_freq(struct drm_device *dev) 5354void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5195{ 5355{
5196 struct drm_i915_private *dev_priv = dev->dev_private; 5356 if (!HAS_CORE_RING_FREQ(dev_priv))
5197
5198 if (!HAS_CORE_RING_FREQ(dev))
5199 return; 5357 return;
5200 5358
5201 mutex_lock(&dev_priv->rps.hw_lock); 5359 mutex_lock(&dev_priv->rps.hw_lock);
5202 __gen6_update_ring_freq(dev); 5360 __gen6_update_ring_freq(dev_priv);
5203 mutex_unlock(&dev_priv->rps.hw_lock); 5361 mutex_unlock(&dev_priv->rps.hw_lock);
5204} 5362}
5205 5363
5206static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 5364static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5207{ 5365{
5208 struct drm_device *dev = dev_priv->dev;
5209 u32 val, rp0; 5366 u32 val, rp0;
5210 5367
5211 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5368 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5212 5369
5213 switch (INTEL_INFO(dev)->eu_total) { 5370 switch (INTEL_INFO(dev_priv)->eu_total) {
5214 case 8: 5371 case 8:
5215 /* (2 * 4) config */ 5372 /* (2 * 4) config */
5216 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5373 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5321,9 +5478,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5321 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 5478 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5322} 5479}
5323 5480
5324static void cherryview_setup_pctx(struct drm_device *dev) 5481static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5325{ 5482{
5326 struct drm_i915_private *dev_priv = to_i915(dev);
5327 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5483 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5328 unsigned long pctx_paddr, paddr; 5484 unsigned long pctx_paddr, paddr;
5329 u32 pcbr; 5485 u32 pcbr;
@@ -5342,15 +5498,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
5342 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5498 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5343} 5499}
5344 5500
5345static void valleyview_setup_pctx(struct drm_device *dev) 5501static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5346{ 5502{
5347 struct drm_i915_private *dev_priv = dev->dev_private;
5348 struct drm_i915_gem_object *pctx; 5503 struct drm_i915_gem_object *pctx;
5349 unsigned long pctx_paddr; 5504 unsigned long pctx_paddr;
5350 u32 pcbr; 5505 u32 pcbr;
5351 int pctx_size = 24*1024; 5506 int pctx_size = 24*1024;
5352 5507
5353 mutex_lock(&dev->struct_mutex); 5508 mutex_lock(&dev_priv->dev->struct_mutex);
5354 5509
5355 pcbr = I915_READ(VLV_PCBR); 5510 pcbr = I915_READ(VLV_PCBR);
5356 if (pcbr) { 5511 if (pcbr) {
@@ -5375,7 +5530,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5375 * overlap with other ranges, such as the frame buffer, protected 5530 * overlap with other ranges, such as the frame buffer, protected
5376 * memory, or any other relevant ranges. 5531 * memory, or any other relevant ranges.
5377 */ 5532 */
5378 pctx = i915_gem_object_create_stolen(dev, pctx_size); 5533 pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
5379 if (!pctx) { 5534 if (!pctx) {
5380 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5535 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5381 goto out; 5536 goto out;
@@ -5387,13 +5542,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5387out: 5542out:
5388 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5543 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5389 dev_priv->vlv_pctx = pctx; 5544 dev_priv->vlv_pctx = pctx;
5390 mutex_unlock(&dev->struct_mutex); 5545 mutex_unlock(&dev_priv->dev->struct_mutex);
5391} 5546}
5392 5547
5393static void valleyview_cleanup_pctx(struct drm_device *dev) 5548static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5394{ 5549{
5395 struct drm_i915_private *dev_priv = dev->dev_private;
5396
5397 if (WARN_ON(!dev_priv->vlv_pctx)) 5550 if (WARN_ON(!dev_priv->vlv_pctx))
5398 return; 5551 return;
5399 5552
@@ -5412,12 +5565,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5412 dev_priv->rps.gpll_ref_freq); 5565 dev_priv->rps.gpll_ref_freq);
5413} 5566}
5414 5567
5415static void valleyview_init_gt_powersave(struct drm_device *dev) 5568static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5416{ 5569{
5417 struct drm_i915_private *dev_priv = dev->dev_private;
5418 u32 val; 5570 u32 val;
5419 5571
5420 valleyview_setup_pctx(dev); 5572 valleyview_setup_pctx(dev_priv);
5421 5573
5422 vlv_init_gpll_ref_freq(dev_priv); 5574 vlv_init_gpll_ref_freq(dev_priv);
5423 5575
@@ -5471,12 +5623,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
5471 mutex_unlock(&dev_priv->rps.hw_lock); 5623 mutex_unlock(&dev_priv->rps.hw_lock);
5472} 5624}
5473 5625
5474static void cherryview_init_gt_powersave(struct drm_device *dev) 5626static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5475{ 5627{
5476 struct drm_i915_private *dev_priv = dev->dev_private;
5477 u32 val; 5628 u32 val;
5478 5629
5479 cherryview_setup_pctx(dev); 5630 cherryview_setup_pctx(dev_priv);
5480 5631
5481 vlv_init_gpll_ref_freq(dev_priv); 5632 vlv_init_gpll_ref_freq(dev_priv);
5482 5633
@@ -5536,14 +5687,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5536 mutex_unlock(&dev_priv->rps.hw_lock); 5687 mutex_unlock(&dev_priv->rps.hw_lock);
5537} 5688}
5538 5689
5539static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 5690static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5540{ 5691{
5541 valleyview_cleanup_pctx(dev); 5692 valleyview_cleanup_pctx(dev_priv);
5542} 5693}
5543 5694
5544static void cherryview_enable_rps(struct drm_device *dev) 5695static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5545{ 5696{
5546 struct drm_i915_private *dev_priv = dev->dev_private;
5547 struct intel_engine_cs *engine; 5697 struct intel_engine_cs *engine;
5548 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5698 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5549 5699
@@ -5588,8 +5738,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
5588 pcbr = I915_READ(VLV_PCBR); 5738 pcbr = I915_READ(VLV_PCBR);
5589 5739
5590 /* 3: Enable RC6 */ 5740 /* 3: Enable RC6 */
5591 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5741 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5592 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5742 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5593 rc6_mode = GEN7_RC_CTL_TO_MODE; 5743 rc6_mode = GEN7_RC_CTL_TO_MODE;
5594 5744
5595 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5745 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5634,14 +5784,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
5634 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 5784 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5635 dev_priv->rps.idle_freq); 5785 dev_priv->rps.idle_freq);
5636 5786
5637 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5787 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5638 5788
5639 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5789 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5640} 5790}
5641 5791
5642static void valleyview_enable_rps(struct drm_device *dev) 5792static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
5643{ 5793{
5644 struct drm_i915_private *dev_priv = dev->dev_private;
5645 struct intel_engine_cs *engine; 5794 struct intel_engine_cs *engine;
5646 u32 gtfifodbg, val, rc6_mode = 0; 5795 u32 gtfifodbg, val, rc6_mode = 0;
5647 5796
@@ -5694,10 +5843,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
5694 VLV_MEDIA_RC6_COUNT_EN | 5843 VLV_MEDIA_RC6_COUNT_EN |
5695 VLV_RENDER_RC6_COUNT_EN)); 5844 VLV_RENDER_RC6_COUNT_EN));
5696 5845
5697 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5846 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5698 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 5847 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5699 5848
5700 intel_print_rc6_info(dev, rc6_mode); 5849 intel_print_rc6_info(dev_priv, rc6_mode);
5701 5850
5702 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5851 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5703 5852
@@ -5724,7 +5873,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
5724 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 5873 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5725 dev_priv->rps.idle_freq); 5874 dev_priv->rps.idle_freq);
5726 5875
5727 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5876 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5728 5877
5729 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5878 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5730} 5879}
@@ -5814,10 +5963,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5814 5963
5815unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 5964unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5816{ 5965{
5817 struct drm_device *dev = dev_priv->dev;
5818 unsigned long val; 5966 unsigned long val;
5819 5967
5820 if (INTEL_INFO(dev)->gen != 5) 5968 if (INTEL_INFO(dev_priv)->gen != 5)
5821 return 0; 5969 return 0;
5822 5970
5823 spin_lock_irq(&mchdev_lock); 5971 spin_lock_irq(&mchdev_lock);
@@ -5857,11 +6005,10 @@ static int _pxvid_to_vd(u8 pxvid)
5857 6005
5858static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 6006static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5859{ 6007{
5860 struct drm_device *dev = dev_priv->dev;
5861 const int vd = _pxvid_to_vd(pxvid); 6008 const int vd = _pxvid_to_vd(pxvid);
5862 const int vm = vd - 1125; 6009 const int vm = vd - 1125;
5863 6010
5864 if (INTEL_INFO(dev)->is_mobile) 6011 if (INTEL_INFO(dev_priv)->is_mobile)
5865 return vm > 0 ? vm : 0; 6012 return vm > 0 ? vm : 0;
5866 6013
5867 return vd; 6014 return vd;
@@ -5902,9 +6049,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5902 6049
5903void i915_update_gfx_val(struct drm_i915_private *dev_priv) 6050void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5904{ 6051{
5905 struct drm_device *dev = dev_priv->dev; 6052 if (INTEL_INFO(dev_priv)->gen != 5)
5906
5907 if (INTEL_INFO(dev)->gen != 5)
5908 return; 6053 return;
5909 6054
5910 spin_lock_irq(&mchdev_lock); 6055 spin_lock_irq(&mchdev_lock);
@@ -5953,10 +6098,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5953 6098
5954unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 6099unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5955{ 6100{
5956 struct drm_device *dev = dev_priv->dev;
5957 unsigned long val; 6101 unsigned long val;
5958 6102
5959 if (INTEL_INFO(dev)->gen != 5) 6103 if (INTEL_INFO(dev_priv)->gen != 5)
5960 return 0; 6104 return 0;
5961 6105
5962 spin_lock_irq(&mchdev_lock); 6106 spin_lock_irq(&mchdev_lock);
@@ -6097,7 +6241,7 @@ bool i915_gpu_turbo_disable(void)
6097 6241
6098 dev_priv->ips.max_delay = dev_priv->ips.fstart; 6242 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6099 6243
6100 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 6244 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6101 ret = false; 6245 ret = false;
6102 6246
6103out_unlock: 6247out_unlock:
@@ -6145,9 +6289,8 @@ void intel_gpu_ips_teardown(void)
6145 spin_unlock_irq(&mchdev_lock); 6289 spin_unlock_irq(&mchdev_lock);
6146} 6290}
6147 6291
6148static void intel_init_emon(struct drm_device *dev) 6292static void intel_init_emon(struct drm_i915_private *dev_priv)
6149{ 6293{
6150 struct drm_i915_private *dev_priv = dev->dev_private;
6151 u32 lcfuse; 6294 u32 lcfuse;
6152 u8 pxw[16]; 6295 u8 pxw[16];
6153 int i; 6296 int i;
@@ -6216,10 +6359,8 @@ static void intel_init_emon(struct drm_device *dev)
6216 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 6359 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6217} 6360}
6218 6361
6219void intel_init_gt_powersave(struct drm_device *dev) 6362void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6220{ 6363{
6221 struct drm_i915_private *dev_priv = dev->dev_private;
6222
6223 /* 6364 /*
6224 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6365 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6225 * requirement. 6366 * requirement.
@@ -6229,74 +6370,66 @@ void intel_init_gt_powersave(struct drm_device *dev)
6229 intel_runtime_pm_get(dev_priv); 6370 intel_runtime_pm_get(dev_priv);
6230 } 6371 }
6231 6372
6232 if (IS_CHERRYVIEW(dev)) 6373 if (IS_CHERRYVIEW(dev_priv))
6233 cherryview_init_gt_powersave(dev); 6374 cherryview_init_gt_powersave(dev_priv);
6234 else if (IS_VALLEYVIEW(dev)) 6375 else if (IS_VALLEYVIEW(dev_priv))
6235 valleyview_init_gt_powersave(dev); 6376 valleyview_init_gt_powersave(dev_priv);
6236} 6377}
6237 6378
6238void intel_cleanup_gt_powersave(struct drm_device *dev) 6379void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6239{ 6380{
6240 struct drm_i915_private *dev_priv = dev->dev_private; 6381 if (IS_CHERRYVIEW(dev_priv))
6241
6242 if (IS_CHERRYVIEW(dev))
6243 return; 6382 return;
6244 else if (IS_VALLEYVIEW(dev)) 6383 else if (IS_VALLEYVIEW(dev_priv))
6245 valleyview_cleanup_gt_powersave(dev); 6384 valleyview_cleanup_gt_powersave(dev_priv);
6246 6385
6247 if (!i915.enable_rc6) 6386 if (!i915.enable_rc6)
6248 intel_runtime_pm_put(dev_priv); 6387 intel_runtime_pm_put(dev_priv);
6249} 6388}
6250 6389
6251static void gen6_suspend_rps(struct drm_device *dev) 6390static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
6252{ 6391{
6253 struct drm_i915_private *dev_priv = dev->dev_private;
6254
6255 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6392 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6256 6393
6257 gen6_disable_rps_interrupts(dev); 6394 gen6_disable_rps_interrupts(dev_priv);
6258} 6395}
6259 6396
6260/** 6397/**
6261 * intel_suspend_gt_powersave - suspend PM work and helper threads 6398 * intel_suspend_gt_powersave - suspend PM work and helper threads
6262 * @dev: drm device 6399 * @dev_priv: i915 device
6263 * 6400 *
6264 * We don't want to disable RC6 or other features here, we just want 6401 * We don't want to disable RC6 or other features here, we just want
6265 * to make sure any work we've queued has finished and won't bother 6402 * to make sure any work we've queued has finished and won't bother
6266 * us while we're suspended. 6403 * us while we're suspended.
6267 */ 6404 */
6268void intel_suspend_gt_powersave(struct drm_device *dev) 6405void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6269{ 6406{
6270 struct drm_i915_private *dev_priv = dev->dev_private; 6407 if (INTEL_GEN(dev_priv) < 6)
6271
6272 if (INTEL_INFO(dev)->gen < 6)
6273 return; 6408 return;
6274 6409
6275 gen6_suspend_rps(dev); 6410 gen6_suspend_rps(dev_priv);
6276 6411
6277 /* Force GPU to min freq during suspend */ 6412 /* Force GPU to min freq during suspend */
6278 gen6_rps_idle(dev_priv); 6413 gen6_rps_idle(dev_priv);
6279} 6414}
6280 6415
6281void intel_disable_gt_powersave(struct drm_device *dev) 6416void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6282{ 6417{
6283 struct drm_i915_private *dev_priv = dev->dev_private; 6418 if (IS_IRONLAKE_M(dev_priv)) {
6284 6419 ironlake_disable_drps(dev_priv);
6285 if (IS_IRONLAKE_M(dev)) { 6420 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6286 ironlake_disable_drps(dev); 6421 intel_suspend_gt_powersave(dev_priv);
6287 } else if (INTEL_INFO(dev)->gen >= 6) {
6288 intel_suspend_gt_powersave(dev);
6289 6422
6290 mutex_lock(&dev_priv->rps.hw_lock); 6423 mutex_lock(&dev_priv->rps.hw_lock);
6291 if (INTEL_INFO(dev)->gen >= 9) { 6424 if (INTEL_INFO(dev_priv)->gen >= 9) {
6292 gen9_disable_rc6(dev); 6425 gen9_disable_rc6(dev_priv);
6293 gen9_disable_rps(dev); 6426 gen9_disable_rps(dev_priv);
6294 } else if (IS_CHERRYVIEW(dev)) 6427 } else if (IS_CHERRYVIEW(dev_priv))
6295 cherryview_disable_rps(dev); 6428 cherryview_disable_rps(dev_priv);
6296 else if (IS_VALLEYVIEW(dev)) 6429 else if (IS_VALLEYVIEW(dev_priv))
6297 valleyview_disable_rps(dev); 6430 valleyview_disable_rps(dev_priv);
6298 else 6431 else
6299 gen6_disable_rps(dev); 6432 gen6_disable_rps(dev_priv);
6300 6433
6301 dev_priv->rps.enabled = false; 6434 dev_priv->rps.enabled = false;
6302 mutex_unlock(&dev_priv->rps.hw_lock); 6435 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6308,27 +6441,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6308 struct drm_i915_private *dev_priv = 6441 struct drm_i915_private *dev_priv =
6309 container_of(work, struct drm_i915_private, 6442 container_of(work, struct drm_i915_private,
6310 rps.delayed_resume_work.work); 6443 rps.delayed_resume_work.work);
6311 struct drm_device *dev = dev_priv->dev;
6312 6444
6313 mutex_lock(&dev_priv->rps.hw_lock); 6445 mutex_lock(&dev_priv->rps.hw_lock);
6314 6446
6315 gen6_reset_rps_interrupts(dev); 6447 gen6_reset_rps_interrupts(dev_priv);
6316 6448
6317 if (IS_CHERRYVIEW(dev)) { 6449 if (IS_CHERRYVIEW(dev_priv)) {
6318 cherryview_enable_rps(dev); 6450 cherryview_enable_rps(dev_priv);
6319 } else if (IS_VALLEYVIEW(dev)) { 6451 } else if (IS_VALLEYVIEW(dev_priv)) {
6320 valleyview_enable_rps(dev); 6452 valleyview_enable_rps(dev_priv);
6321 } else if (INTEL_INFO(dev)->gen >= 9) { 6453 } else if (INTEL_INFO(dev_priv)->gen >= 9) {
6322 gen9_enable_rc6(dev); 6454 gen9_enable_rc6(dev_priv);
6323 gen9_enable_rps(dev); 6455 gen9_enable_rps(dev_priv);
6324 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 6456 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6325 __gen6_update_ring_freq(dev); 6457 __gen6_update_ring_freq(dev_priv);
6326 } else if (IS_BROADWELL(dev)) { 6458 } else if (IS_BROADWELL(dev_priv)) {
6327 gen8_enable_rps(dev); 6459 gen8_enable_rps(dev_priv);
6328 __gen6_update_ring_freq(dev); 6460 __gen6_update_ring_freq(dev_priv);
6329 } else { 6461 } else {
6330 gen6_enable_rps(dev); 6462 gen6_enable_rps(dev_priv);
6331 __gen6_update_ring_freq(dev); 6463 __gen6_update_ring_freq(dev_priv);
6332 } 6464 }
6333 6465
6334 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 6466 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6339,27 +6471,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6339 6471
6340 dev_priv->rps.enabled = true; 6472 dev_priv->rps.enabled = true;
6341 6473
6342 gen6_enable_rps_interrupts(dev); 6474 gen6_enable_rps_interrupts(dev_priv);
6343 6475
6344 mutex_unlock(&dev_priv->rps.hw_lock); 6476 mutex_unlock(&dev_priv->rps.hw_lock);
6345 6477
6346 intel_runtime_pm_put(dev_priv); 6478 intel_runtime_pm_put(dev_priv);
6347} 6479}
6348 6480
6349void intel_enable_gt_powersave(struct drm_device *dev) 6481void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6350{ 6482{
6351 struct drm_i915_private *dev_priv = dev->dev_private;
6352
6353 /* Powersaving is controlled by the host when inside a VM */ 6483 /* Powersaving is controlled by the host when inside a VM */
6354 if (intel_vgpu_active(dev)) 6484 if (intel_vgpu_active(dev_priv))
6355 return; 6485 return;
6356 6486
6357 if (IS_IRONLAKE_M(dev)) { 6487 if (IS_IRONLAKE_M(dev_priv)) {
6358 ironlake_enable_drps(dev); 6488 ironlake_enable_drps(dev_priv);
6359 mutex_lock(&dev->struct_mutex); 6489 mutex_lock(&dev_priv->dev->struct_mutex);
6360 intel_init_emon(dev); 6490 intel_init_emon(dev_priv);
6361 mutex_unlock(&dev->struct_mutex); 6491 mutex_unlock(&dev_priv->dev->struct_mutex);
6362 } else if (INTEL_INFO(dev)->gen >= 6) { 6492 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6363 /* 6493 /*
6364 * PCU communication is slow and this doesn't need to be 6494 * PCU communication is slow and this doesn't need to be
6365 * done at any specific time, so do this out of our fast path 6495 * done at any specific time, so do this out of our fast path
@@ -6378,14 +6508,12 @@ void intel_enable_gt_powersave(struct drm_device *dev)
6378 } 6508 }
6379} 6509}
6380 6510
6381void intel_reset_gt_powersave(struct drm_device *dev) 6511void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
6382{ 6512{
6383 struct drm_i915_private *dev_priv = dev->dev_private; 6513 if (INTEL_INFO(dev_priv)->gen < 6)
6384
6385 if (INTEL_INFO(dev)->gen < 6)
6386 return; 6514 return;
6387 6515
6388 gen6_suspend_rps(dev); 6516 gen6_suspend_rps(dev_priv);
6389 dev_priv->rps.enabled = false; 6517 dev_priv->rps.enabled = false;
6390} 6518}
6391 6519
@@ -6698,11 +6826,42 @@ static void lpt_suspend_hw(struct drm_device *dev)
6698 } 6826 }
6699} 6827}
6700 6828
6829static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6830 int general_prio_credits,
6831 int high_prio_credits)
6832{
6833 u32 misccpctl;
6834
6835 /* WaTempDisableDOPClkGating:bdw */
6836 misccpctl = I915_READ(GEN7_MISCCPCTL);
6837 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6838
6839 I915_WRITE(GEN8_L3SQCREG1,
6840 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
6841 L3_HIGH_PRIO_CREDITS(high_prio_credits));
6842
6843 /*
6844 * Wait at least 100 clocks before re-enabling clock gating.
6845 * See the definition of L3SQCREG1 in BSpec.
6846 */
6847 POSTING_READ(GEN8_L3SQCREG1);
6848 udelay(1);
6849 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6850}
6851
6852static void skylake_init_clock_gating(struct drm_device *dev)
6853{
6854 struct drm_i915_private *dev_priv = dev->dev_private;
6855
6856 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,kbl */
6857 I915_WRITE(CHICKEN_PAR1_1,
6858 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
6859}
6860
6701static void broadwell_init_clock_gating(struct drm_device *dev) 6861static void broadwell_init_clock_gating(struct drm_device *dev)
6702{ 6862{
6703 struct drm_i915_private *dev_priv = dev->dev_private; 6863 struct drm_i915_private *dev_priv = dev->dev_private;
6704 enum pipe pipe; 6864 enum pipe pipe;
6705 uint32_t misccpctl;
6706 6865
6707 ilk_init_lp_watermarks(dev); 6866 ilk_init_lp_watermarks(dev);
6708 6867
@@ -6733,20 +6892,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
6733 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 6892 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6734 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 6893 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6735 6894
6736 /* 6895 /* WaProgramL3SqcReg1Default:bdw */
6737 * WaProgramL3SqcReg1Default:bdw 6896 gen8_set_l3sqc_credits(dev_priv, 30, 2);
6738 * WaTempDisableDOPClkGating:bdw
6739 */
6740 misccpctl = I915_READ(GEN7_MISCCPCTL);
6741 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6742 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6743 /*
6744 * Wait at least 100 clocks before re-enabling clock gating. See
6745 * the definition of L3SQCREG1 in BSpec.
6746 */
6747 POSTING_READ(GEN8_L3SQCREG1);
6748 udelay(1);
6749 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6750 6897
6751 /* 6898 /*
6752 * WaGttCachingOffByDefault:bdw 6899 * WaGttCachingOffByDefault:bdw
@@ -7017,6 +7164,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
7017 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7164 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7018 7165
7019 /* 7166 /*
7167 * WaProgramL3SqcReg1Default:chv
7168 * See gfxspecs/Related Documents/Performance Guide/
7169 * LSQC Setting Recommendations.
7170 */
7171 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7172
7173 /*
7020 * GTT cache may not work with big pages, so if those 7174 * GTT cache may not work with big pages, so if those
7021 * are ever enabled GTT cache may need to be disabled. 7175 * are ever enabled GTT cache may need to be disabled.
7022 */ 7176 */
@@ -7163,9 +7317,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
7163void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 7317void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7164{ 7318{
7165 if (IS_SKYLAKE(dev_priv)) 7319 if (IS_SKYLAKE(dev_priv))
7166 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7320 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7167 else if (IS_KABYLAKE(dev_priv)) 7321 else if (IS_KABYLAKE(dev_priv))
7168 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7322 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7169 else if (IS_BROXTON(dev_priv)) 7323 else if (IS_BROXTON(dev_priv))
7170 dev_priv->display.init_clock_gating = bxt_init_clock_gating; 7324 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7171 else if (IS_BROADWELL(dev_priv)) 7325 else if (IS_BROADWELL(dev_priv))
@@ -7217,6 +7371,7 @@ void intel_init_pm(struct drm_device *dev)
7217 if (INTEL_INFO(dev)->gen >= 9) { 7371 if (INTEL_INFO(dev)->gen >= 9) {
7218 skl_setup_wm_latency(dev); 7372 skl_setup_wm_latency(dev);
7219 dev_priv->display.update_wm = skl_update_wm; 7373 dev_priv->display.update_wm = skl_update_wm;
7374 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7220 } else if (HAS_PCH_SPLIT(dev)) { 7375 } else if (HAS_PCH_SPLIT(dev)) {
7221 ilk_setup_wm_latency(dev); 7376 ilk_setup_wm_latency(dev);
7222 7377
@@ -7390,19 +7545,17 @@ static void __intel_rps_boost_work(struct work_struct *work)
7390 struct drm_i915_gem_request *req = boost->req; 7545 struct drm_i915_gem_request *req = boost->req;
7391 7546
7392 if (!i915_gem_request_completed(req, true)) 7547 if (!i915_gem_request_completed(req, true))
7393 gen6_rps_boost(to_i915(req->engine->dev), NULL, 7548 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7394 req->emitted_jiffies);
7395 7549
7396 i915_gem_request_unreference__unlocked(req); 7550 i915_gem_request_unreference(req);
7397 kfree(boost); 7551 kfree(boost);
7398} 7552}
7399 7553
7400void intel_queue_rps_boost_for_request(struct drm_device *dev, 7554void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7401 struct drm_i915_gem_request *req)
7402{ 7555{
7403 struct request_boost *boost; 7556 struct request_boost *boost;
7404 7557
7405 if (req == NULL || INTEL_INFO(dev)->gen < 6) 7558 if (req == NULL || INTEL_GEN(req->i915) < 6)
7406 return; 7559 return;
7407 7560
7408 if (i915_gem_request_completed(req, true)) 7561 if (i915_gem_request_completed(req, true))
@@ -7416,7 +7569,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
7416 boost->req = req; 7569 boost->req = req;
7417 7570
7418 INIT_WORK(&boost->work, __intel_rps_boost_work); 7571 INIT_WORK(&boost->work, __intel_rps_boost_work);
7419 queue_work(to_i915(dev)->wq, &boost->work); 7572 queue_work(req->i915->wq, &boost->work);
7420} 7573}
7421 7574
7422void intel_pm_setup(struct drm_device *dev) 7575void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a788d1e9589b..29a09bf6bd18 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -176,7 +176,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
176 struct drm_i915_private *dev_priv = dev->dev_private; 176 struct drm_i915_private *dev_priv = dev->dev_private;
177 uint32_t aux_clock_divider; 177 uint32_t aux_clock_divider;
178 i915_reg_t aux_ctl_reg; 178 i915_reg_t aux_ctl_reg;
179 int precharge = 0x3;
180 static const uint8_t aux_msg[] = { 179 static const uint8_t aux_msg[] = {
181 [0] = DP_AUX_NATIVE_WRITE << 4, 180 [0] = DP_AUX_NATIVE_WRITE << 4,
182 [1] = DP_SET_POWER >> 8, 181 [1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
185 [4] = DP_SET_POWER_D0, 184 [4] = DP_SET_POWER_D0,
186 }; 185 };
187 enum port port = dig_port->port; 186 enum port port = dig_port->port;
187 u32 aux_ctl;
188 int i; 188 int i;
189 189
190 BUILD_BUG_ON(sizeof(aux_msg) > 20); 190 BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, 197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
198 DP_AUX_FRAME_SYNC_ENABLE); 198 DP_AUX_FRAME_SYNC_ENABLE);
199 199
200 if (dev_priv->psr.link_standby)
201 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
202 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
203 else
204 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
205 DP_PSR_ENABLE);
206
200 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); 207 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
201 208
202 /* Setup AUX registers */ 209 /* Setup AUX registers */
@@ -204,33 +211,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
204 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), 211 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
205 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 212 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
206 213
207 if (INTEL_INFO(dev)->gen >= 9) { 214 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
208 uint32_t val; 215 aux_clock_divider);
209 216 I915_WRITE(aux_ctl_reg, aux_ctl);
210 val = I915_READ(aux_ctl_reg);
211 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
212 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
213 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
214 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
215 /* Use hardcoded data values for PSR, frame sync and GTC */
216 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
217 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
218 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
219 I915_WRITE(aux_ctl_reg, val);
220 } else {
221 I915_WRITE(aux_ctl_reg,
222 DP_AUX_CH_CTL_TIME_OUT_400us |
223 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
224 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
225 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
226 }
227
228 if (dev_priv->psr.link_standby)
229 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
230 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
231 else
232 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
233 DP_PSR_ENABLE);
234} 217}
235 218
236static void vlv_psr_enable_source(struct intel_dp *intel_dp) 219static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -272,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
272 struct drm_i915_private *dev_priv = dev->dev_private; 255 struct drm_i915_private *dev_priv = dev->dev_private;
273 256
274 uint32_t max_sleep_time = 0x1f; 257 uint32_t max_sleep_time = 0x1f;
275 /* 258 /* Lately it was identified that depending on panel idle frame count
276 * Let's respect VBT in case VBT asks a higher idle_frame value. 259 * calculated at HW can be off by 1. So let's use what came
277 * Let's use 6 as the minimum to cover all known cases including 260 * from VBT + 1.
278 * the off-by-one issue that HW has in some cases. Also there are 261 * There are also other cases where panel demands at least 4
279 * cases where sink should be able to train 262 * but VBT is not being set. To cover these 2 cases lets use
280 * with the 5 or 6 idle patterns. 263 * at least 5 when VBT isn't set to be on the safest side.
281 */ 264 */
282 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 265 uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
283 uint32_t val = EDP_PSR_ENABLE; 266 uint32_t val = EDP_PSR_ENABLE;
284 267
285 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 268 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04402bb9d26b..8d35a3978f9b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,11 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
37int __intel_ring_space(int head, int tail, int size) 42int __intel_ring_space(int head, int tail, int size)
38{ 43{
39 int space = head - tail; 44 int space = head - tail;
@@ -55,7 +60,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
55 60
56bool intel_engine_stopped(struct intel_engine_cs *engine) 61bool intel_engine_stopped(struct intel_engine_cs *engine)
57{ 62{
58 struct drm_i915_private *dev_priv = engine->dev->dev_private; 63 struct drm_i915_private *dev_priv = engine->i915;
59 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); 64 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
60} 65}
61 66
@@ -101,7 +106,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
101 u32 flush_domains) 106 u32 flush_domains)
102{ 107{
103 struct intel_engine_cs *engine = req->engine; 108 struct intel_engine_cs *engine = req->engine;
104 struct drm_device *dev = engine->dev;
105 u32 cmd; 109 u32 cmd;
106 int ret; 110 int ret;
107 111
@@ -140,7 +144,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
140 cmd |= MI_EXE_FLUSH; 144 cmd |= MI_EXE_FLUSH;
141 145
142 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 146 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
143 (IS_G4X(dev) || IS_GEN5(dev))) 147 (IS_G4X(req->i915) || IS_GEN5(req->i915)))
144 cmd |= MI_INVALIDATE_ISP; 148 cmd |= MI_INVALIDATE_ISP;
145 149
146 ret = intel_ring_begin(req, 2); 150 ret = intel_ring_begin(req, 2);
@@ -426,19 +430,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
426static void ring_write_tail(struct intel_engine_cs *engine, 430static void ring_write_tail(struct intel_engine_cs *engine,
427 u32 value) 431 u32 value)
428{ 432{
429 struct drm_i915_private *dev_priv = engine->dev->dev_private; 433 struct drm_i915_private *dev_priv = engine->i915;
430 I915_WRITE_TAIL(engine, value); 434 I915_WRITE_TAIL(engine, value);
431} 435}
432 436
433u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 437u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
434{ 438{
435 struct drm_i915_private *dev_priv = engine->dev->dev_private; 439 struct drm_i915_private *dev_priv = engine->i915;
436 u64 acthd; 440 u64 acthd;
437 441
438 if (INTEL_INFO(engine->dev)->gen >= 8) 442 if (INTEL_GEN(dev_priv) >= 8)
439 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 443 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
440 RING_ACTHD_UDW(engine->mmio_base)); 444 RING_ACTHD_UDW(engine->mmio_base));
441 else if (INTEL_INFO(engine->dev)->gen >= 4) 445 else if (INTEL_GEN(dev_priv) >= 4)
442 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 446 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
443 else 447 else
444 acthd = I915_READ(ACTHD); 448 acthd = I915_READ(ACTHD);
@@ -448,25 +452,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
448 452
449static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 453static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
450{ 454{
451 struct drm_i915_private *dev_priv = engine->dev->dev_private; 455 struct drm_i915_private *dev_priv = engine->i915;
452 u32 addr; 456 u32 addr;
453 457
454 addr = dev_priv->status_page_dmah->busaddr; 458 addr = dev_priv->status_page_dmah->busaddr;
455 if (INTEL_INFO(engine->dev)->gen >= 4) 459 if (INTEL_GEN(dev_priv) >= 4)
456 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 460 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
457 I915_WRITE(HWS_PGA, addr); 461 I915_WRITE(HWS_PGA, addr);
458} 462}
459 463
460static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 464static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
461{ 465{
462 struct drm_device *dev = engine->dev; 466 struct drm_i915_private *dev_priv = engine->i915;
463 struct drm_i915_private *dev_priv = engine->dev->dev_private;
464 i915_reg_t mmio; 467 i915_reg_t mmio;
465 468
466 /* The ring status page addresses are no longer next to the rest of 469 /* The ring status page addresses are no longer next to the rest of
467 * the ring registers as of gen7. 470 * the ring registers as of gen7.
468 */ 471 */
469 if (IS_GEN7(dev)) { 472 if (IS_GEN7(dev_priv)) {
470 switch (engine->id) { 473 switch (engine->id) {
471 case RCS: 474 case RCS:
472 mmio = RENDER_HWS_PGA_GEN7; 475 mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +489,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
486 mmio = VEBOX_HWS_PGA_GEN7; 489 mmio = VEBOX_HWS_PGA_GEN7;
487 break; 490 break;
488 } 491 }
489 } else if (IS_GEN6(engine->dev)) { 492 } else if (IS_GEN6(dev_priv)) {
490 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 493 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
491 } else { 494 } else {
492 /* XXX: gen8 returns to sanity */ 495 /* XXX: gen8 returns to sanity */
@@ -503,7 +506,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
503 * arises: do we still need this and if so how should we go about 506 * arises: do we still need this and if so how should we go about
504 * invalidating the TLB? 507 * invalidating the TLB?
505 */ 508 */
506 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 509 if (IS_GEN(dev_priv, 6, 7)) {
507 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 510 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
508 511
509 /* ring should be idle before issuing a sync flush*/ 512 /* ring should be idle before issuing a sync flush*/
@@ -521,9 +524,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
521 524
522static bool stop_ring(struct intel_engine_cs *engine) 525static bool stop_ring(struct intel_engine_cs *engine)
523{ 526{
524 struct drm_i915_private *dev_priv = to_i915(engine->dev); 527 struct drm_i915_private *dev_priv = engine->i915;
525 528
526 if (!IS_GEN2(engine->dev)) { 529 if (!IS_GEN2(dev_priv)) {
527 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 530 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
528 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 531 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
529 DRM_ERROR("%s : timed out trying to stop ring\n", 532 DRM_ERROR("%s : timed out trying to stop ring\n",
@@ -541,7 +544,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
541 I915_WRITE_HEAD(engine, 0); 544 I915_WRITE_HEAD(engine, 0);
542 engine->write_tail(engine, 0); 545 engine->write_tail(engine, 0);
543 546
544 if (!IS_GEN2(engine->dev)) { 547 if (!IS_GEN2(dev_priv)) {
545 (void)I915_READ_CTL(engine); 548 (void)I915_READ_CTL(engine);
546 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 549 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
547 } 550 }
@@ -556,8 +559,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
556 559
557static int init_ring_common(struct intel_engine_cs *engine) 560static int init_ring_common(struct intel_engine_cs *engine)
558{ 561{
559 struct drm_device *dev = engine->dev; 562 struct drm_i915_private *dev_priv = engine->i915;
560 struct drm_i915_private *dev_priv = dev->dev_private;
561 struct intel_ringbuffer *ringbuf = engine->buffer; 563 struct intel_ringbuffer *ringbuf = engine->buffer;
562 struct drm_i915_gem_object *obj = ringbuf->obj; 564 struct drm_i915_gem_object *obj = ringbuf->obj;
563 int ret = 0; 565 int ret = 0;
@@ -587,7 +589,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
587 } 589 }
588 } 590 }
589 591
590 if (I915_NEED_GFX_HWS(dev)) 592 if (I915_NEED_GFX_HWS(dev_priv))
591 intel_ring_setup_status_page(engine); 593 intel_ring_setup_status_page(engine);
592 else 594 else
593 ring_setup_phys_status_page(engine); 595 ring_setup_phys_status_page(engine);
@@ -644,12 +646,10 @@ out:
644void 646void
645intel_fini_pipe_control(struct intel_engine_cs *engine) 647intel_fini_pipe_control(struct intel_engine_cs *engine)
646{ 648{
647 struct drm_device *dev = engine->dev;
648
649 if (engine->scratch.obj == NULL) 649 if (engine->scratch.obj == NULL)
650 return; 650 return;
651 651
652 if (INTEL_INFO(dev)->gen >= 5) { 652 if (INTEL_GEN(engine->i915) >= 5) {
653 kunmap(sg_page(engine->scratch.obj->pages->sgl)); 653 kunmap(sg_page(engine->scratch.obj->pages->sgl));
654 i915_gem_object_ggtt_unpin(engine->scratch.obj); 654 i915_gem_object_ggtt_unpin(engine->scratch.obj);
655 } 655 }
@@ -665,10 +665,11 @@ intel_init_pipe_control(struct intel_engine_cs *engine)
665 665
666 WARN_ON(engine->scratch.obj); 666 WARN_ON(engine->scratch.obj);
667 667
668 engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); 668 engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
669 if (engine->scratch.obj == NULL) { 669 if (IS_ERR(engine->scratch.obj)) {
670 DRM_ERROR("Failed to allocate seqno page\n"); 670 DRM_ERROR("Failed to allocate seqno page\n");
671 ret = -ENOMEM; 671 ret = PTR_ERR(engine->scratch.obj);
672 engine->scratch.obj = NULL;
672 goto err; 673 goto err;
673 } 674 }
674 675
@@ -702,11 +703,9 @@ err:
702 703
703static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 704static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
704{ 705{
705 int ret, i;
706 struct intel_engine_cs *engine = req->engine; 706 struct intel_engine_cs *engine = req->engine;
707 struct drm_device *dev = engine->dev; 707 struct i915_workarounds *w = &req->i915->workarounds;
708 struct drm_i915_private *dev_priv = dev->dev_private; 708 int ret, i;
709 struct i915_workarounds *w = &dev_priv->workarounds;
710 709
711 if (w->count == 0) 710 if (w->count == 0)
712 return 0; 711 return 0;
@@ -795,7 +794,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
795static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 794static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
796 i915_reg_t reg) 795 i915_reg_t reg)
797{ 796{
798 struct drm_i915_private *dev_priv = engine->dev->dev_private; 797 struct drm_i915_private *dev_priv = engine->i915;
799 struct i915_workarounds *wa = &dev_priv->workarounds; 798 struct i915_workarounds *wa = &dev_priv->workarounds;
800 const uint32_t index = wa->hw_whitelist_count[engine->id]; 799 const uint32_t index = wa->hw_whitelist_count[engine->id];
801 800
@@ -811,8 +810,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
811 810
812static int gen8_init_workarounds(struct intel_engine_cs *engine) 811static int gen8_init_workarounds(struct intel_engine_cs *engine)
813{ 812{
814 struct drm_device *dev = engine->dev; 813 struct drm_i915_private *dev_priv = engine->i915;
815 struct drm_i915_private *dev_priv = dev->dev_private;
816 814
817 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 815 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
818 816
@@ -863,9 +861,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
863 861
864static int bdw_init_workarounds(struct intel_engine_cs *engine) 862static int bdw_init_workarounds(struct intel_engine_cs *engine)
865{ 863{
864 struct drm_i915_private *dev_priv = engine->i915;
866 int ret; 865 int ret;
867 struct drm_device *dev = engine->dev;
868 struct drm_i915_private *dev_priv = dev->dev_private;
869 866
870 ret = gen8_init_workarounds(engine); 867 ret = gen8_init_workarounds(engine);
871 if (ret) 868 if (ret)
@@ -885,16 +882,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
885 /* WaForceContextSaveRestoreNonCoherent:bdw */ 882 /* WaForceContextSaveRestoreNonCoherent:bdw */
886 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 883 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
887 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 884 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
888 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 885 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
889 886
890 return 0; 887 return 0;
891} 888}
892 889
893static int chv_init_workarounds(struct intel_engine_cs *engine) 890static int chv_init_workarounds(struct intel_engine_cs *engine)
894{ 891{
892 struct drm_i915_private *dev_priv = engine->i915;
895 int ret; 893 int ret;
896 struct drm_device *dev = engine->dev;
897 struct drm_i915_private *dev_priv = dev->dev_private;
898 894
899 ret = gen8_init_workarounds(engine); 895 ret = gen8_init_workarounds(engine);
900 if (ret) 896 if (ret)
@@ -911,8 +907,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
911 907
912static int gen9_init_workarounds(struct intel_engine_cs *engine) 908static int gen9_init_workarounds(struct intel_engine_cs *engine)
913{ 909{
914 struct drm_device *dev = engine->dev; 910 struct drm_i915_private *dev_priv = engine->i915;
915 struct drm_i915_private *dev_priv = dev->dev_private;
916 uint32_t tmp; 911 uint32_t tmp;
917 int ret; 912 int ret;
918 913
@@ -935,14 +930,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
935 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 930 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
936 931
937 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 932 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
938 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 933 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
939 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 934 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
940 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 935 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
941 GEN9_DG_MIRROR_FIX_ENABLE); 936 GEN9_DG_MIRROR_FIX_ENABLE);
942 937
943 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 938 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
944 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 939 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
945 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 940 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
946 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 941 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
947 GEN9_RHWO_OPTIMIZATION_DISABLE); 942 GEN9_RHWO_OPTIMIZATION_DISABLE);
948 /* 943 /*
@@ -968,20 +963,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
968 GEN9_CCS_TLB_PREFETCH_ENABLE); 963 GEN9_CCS_TLB_PREFETCH_ENABLE);
969 964
970 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 965 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
971 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || 966 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
972 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 967 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
973 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 968 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
974 PIXEL_MASK_CAMMING_DISABLE); 969 PIXEL_MASK_CAMMING_DISABLE);
975 970
976 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 971 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
977 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 972 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
978 if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || 973 if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) ||
979 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 974 IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
980 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 975 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
981 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 976 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
982 977
983 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ 978 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
984 if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) 979 if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
985 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 980 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
986 GEN8_SAMPLER_POWER_BYPASS_DIS); 981 GEN8_SAMPLER_POWER_BYPASS_DIS);
987 982
@@ -1007,8 +1002,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
1007 1002
1008static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 1003static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1009{ 1004{
1010 struct drm_device *dev = engine->dev; 1005 struct drm_i915_private *dev_priv = engine->i915;
1011 struct drm_i915_private *dev_priv = dev->dev_private;
1012 u8 vals[3] = { 0, 0, 0 }; 1006 u8 vals[3] = { 0, 0, 0 };
1013 unsigned int i; 1007 unsigned int i;
1014 1008
@@ -1049,9 +1043,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1049 1043
1050static int skl_init_workarounds(struct intel_engine_cs *engine) 1044static int skl_init_workarounds(struct intel_engine_cs *engine)
1051{ 1045{
1046 struct drm_i915_private *dev_priv = engine->i915;
1052 int ret; 1047 int ret;
1053 struct drm_device *dev = engine->dev;
1054 struct drm_i915_private *dev_priv = dev->dev_private;
1055 1048
1056 ret = gen9_init_workarounds(engine); 1049 ret = gen9_init_workarounds(engine);
1057 if (ret) 1050 if (ret)
@@ -1062,12 +1055,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1062 * until D0 which is the default case so this is equivalent to 1055 * until D0 which is the default case so this is equivalent to
1063 * !WaDisablePerCtxtPreemptionGranularityControl:skl 1056 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1064 */ 1057 */
1065 if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { 1058 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
1066 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 1059 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1067 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 1060 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1068 } 1061 }
1069 1062
1070 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { 1063 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) {
1071 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1064 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1072 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1065 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1073 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1066 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1076,24 +1069,24 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1076 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1069 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1077 * involving this register should also be added to WA batch as required. 1070 * involving this register should also be added to WA batch as required.
1078 */ 1071 */
1079 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) 1072 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
1080 /* WaDisableLSQCROPERFforOCL:skl */ 1073 /* WaDisableLSQCROPERFforOCL:skl */
1081 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1074 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1082 GEN8_LQSC_RO_PERF_DIS); 1075 GEN8_LQSC_RO_PERF_DIS);
1083 1076
1084 /* WaEnableGapsTsvCreditFix:skl */ 1077 /* WaEnableGapsTsvCreditFix:skl */
1085 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { 1078 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
1086 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1079 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1087 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1080 GEN9_GAPS_TSV_CREDIT_DISABLE));
1088 } 1081 }
1089 1082
1090 /* WaDisablePowerCompilerClockGating:skl */ 1083 /* WaDisablePowerCompilerClockGating:skl */
1091 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) 1084 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
1092 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1093 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1094 1087
1095 /* This is tied to WaForceContextSaveRestoreNonCoherent */ 1088 /* This is tied to WaForceContextSaveRestoreNonCoherent */
1096 if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { 1089 if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) {
1097 /* 1090 /*
1098 *Use Force Non-Coherent whenever executing a 3D context. This 1091 *Use Force Non-Coherent whenever executing a 3D context. This
1099 * is a workaround for a possible hang in the unlikely event 1092 * is a workaround for a possible hang in the unlikely event
@@ -1109,13 +1102,13 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1109 } 1102 }
1110 1103
1111 /* WaBarrierPerformanceFixDisable:skl */ 1104 /* WaBarrierPerformanceFixDisable:skl */
1112 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) 1105 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
1113 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1106 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1114 HDC_FENCE_DEST_SLM_DISABLE | 1107 HDC_FENCE_DEST_SLM_DISABLE |
1115 HDC_BARRIER_PERFORMANCE_DISABLE); 1108 HDC_BARRIER_PERFORMANCE_DISABLE);
1116 1109
1117 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1110 /* WaDisableSbeCacheDispatchPortSharing:skl */
1118 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) 1111 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
1119 WA_SET_BIT_MASKED( 1112 WA_SET_BIT_MASKED(
1120 GEN7_HALF_SLICE_CHICKEN1, 1113 GEN7_HALF_SLICE_CHICKEN1,
1121 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1114 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1130,9 +1123,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1130 1123
1131static int bxt_init_workarounds(struct intel_engine_cs *engine) 1124static int bxt_init_workarounds(struct intel_engine_cs *engine)
1132{ 1125{
1126 struct drm_i915_private *dev_priv = engine->i915;
1133 int ret; 1127 int ret;
1134 struct drm_device *dev = engine->dev;
1135 struct drm_i915_private *dev_priv = dev->dev_private;
1136 1128
1137 ret = gen9_init_workarounds(engine); 1129 ret = gen9_init_workarounds(engine);
1138 if (ret) 1130 if (ret)
@@ -1140,11 +1132,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1140 1132
1141 /* WaStoreMultiplePTEenable:bxt */ 1133 /* WaStoreMultiplePTEenable:bxt */
1142 /* This is a requirement according to Hardware specification */ 1134 /* This is a requirement according to Hardware specification */
1143 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1135 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1144 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1136 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1145 1137
1146 /* WaSetClckGatingDisableMedia:bxt */ 1138 /* WaSetClckGatingDisableMedia:bxt */
1147 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1139 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1148 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1140 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1149 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1141 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1150 } 1142 }
@@ -1154,7 +1146,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1154 STALL_DOP_GATING_DISABLE); 1146 STALL_DOP_GATING_DISABLE);
1155 1147
1156 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1148 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1157 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1149 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1158 WA_SET_BIT_MASKED( 1150 WA_SET_BIT_MASKED(
1159 GEN7_HALF_SLICE_CHICKEN1, 1151 GEN7_HALF_SLICE_CHICKEN1,
1160 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1152 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1164,7 +1156,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1164 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1156 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1165 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1157 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1166 /* WaDisableLSQCROPERFforOCL:bxt */ 1158 /* WaDisableLSQCROPERFforOCL:bxt */
1167 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1159 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1168 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1160 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1169 if (ret) 1161 if (ret)
1170 return ret; 1162 return ret;
@@ -1174,29 +1166,33 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1174 return ret; 1166 return ret;
1175 } 1167 }
1176 1168
1169 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1170 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1171 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1172 L3_HIGH_PRIO_CREDITS(2));
1173
1177 return 0; 1174 return 0;
1178} 1175}
1179 1176
1180int init_workarounds_ring(struct intel_engine_cs *engine) 1177int init_workarounds_ring(struct intel_engine_cs *engine)
1181{ 1178{
1182 struct drm_device *dev = engine->dev; 1179 struct drm_i915_private *dev_priv = engine->i915;
1183 struct drm_i915_private *dev_priv = dev->dev_private;
1184 1180
1185 WARN_ON(engine->id != RCS); 1181 WARN_ON(engine->id != RCS);
1186 1182
1187 dev_priv->workarounds.count = 0; 1183 dev_priv->workarounds.count = 0;
1188 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1184 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1189 1185
1190 if (IS_BROADWELL(dev)) 1186 if (IS_BROADWELL(dev_priv))
1191 return bdw_init_workarounds(engine); 1187 return bdw_init_workarounds(engine);
1192 1188
1193 if (IS_CHERRYVIEW(dev)) 1189 if (IS_CHERRYVIEW(dev_priv))
1194 return chv_init_workarounds(engine); 1190 return chv_init_workarounds(engine);
1195 1191
1196 if (IS_SKYLAKE(dev)) 1192 if (IS_SKYLAKE(dev_priv))
1197 return skl_init_workarounds(engine); 1193 return skl_init_workarounds(engine);
1198 1194
1199 if (IS_BROXTON(dev)) 1195 if (IS_BROXTON(dev_priv))
1200 return bxt_init_workarounds(engine); 1196 return bxt_init_workarounds(engine);
1201 1197
1202 return 0; 1198 return 0;
@@ -1204,14 +1200,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
1204 1200
1205static int init_render_ring(struct intel_engine_cs *engine) 1201static int init_render_ring(struct intel_engine_cs *engine)
1206{ 1202{
1207 struct drm_device *dev = engine->dev; 1203 struct drm_i915_private *dev_priv = engine->i915;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 int ret = init_ring_common(engine); 1204 int ret = init_ring_common(engine);
1210 if (ret) 1205 if (ret)
1211 return ret; 1206 return ret;
1212 1207
1213 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1208 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1214 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 1209 if (IS_GEN(dev_priv, 4, 6))
1215 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1210 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1216 1211
1217 /* We need to disable the AsyncFlip performance optimisations in order 1212 /* We need to disable the AsyncFlip performance optimisations in order
@@ -1220,22 +1215,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
1220 * 1215 *
1221 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1216 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1222 */ 1217 */
1223 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1218 if (IS_GEN(dev_priv, 6, 7))
1224 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1219 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1225 1220
1226 /* Required for the hardware to program scanline values for waiting */ 1221 /* Required for the hardware to program scanline values for waiting */
1227 /* WaEnableFlushTlbInvalidationMode:snb */ 1222 /* WaEnableFlushTlbInvalidationMode:snb */
1228 if (INTEL_INFO(dev)->gen == 6) 1223 if (IS_GEN6(dev_priv))
1229 I915_WRITE(GFX_MODE, 1224 I915_WRITE(GFX_MODE,
1230 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1225 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1231 1226
1232 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1227 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1233 if (IS_GEN7(dev)) 1228 if (IS_GEN7(dev_priv))
1234 I915_WRITE(GFX_MODE_GEN7, 1229 I915_WRITE(GFX_MODE_GEN7,
1235 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1230 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1236 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1231 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1237 1232
1238 if (IS_GEN6(dev)) { 1233 if (IS_GEN6(dev_priv)) {
1239 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1234 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1240 * "If this bit is set, STCunit will have LRA as replacement 1235 * "If this bit is set, STCunit will have LRA as replacement
1241 * policy. [...] This bit must be reset. LRA replacement 1236 * policy. [...] This bit must be reset. LRA replacement
@@ -1245,19 +1240,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
1245 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1240 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1246 } 1241 }
1247 1242
1248 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1243 if (IS_GEN(dev_priv, 6, 7))
1249 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1244 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1250 1245
1251 if (HAS_L3_DPF(dev)) 1246 if (HAS_L3_DPF(dev_priv))
1252 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1247 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1253 1248
1254 return init_workarounds_ring(engine); 1249 return init_workarounds_ring(engine);
1255} 1250}
1256 1251
1257static void render_ring_cleanup(struct intel_engine_cs *engine) 1252static void render_ring_cleanup(struct intel_engine_cs *engine)
1258{ 1253{
1259 struct drm_device *dev = engine->dev; 1254 struct drm_i915_private *dev_priv = engine->i915;
1260 struct drm_i915_private *dev_priv = dev->dev_private;
1261 1255
1262 if (dev_priv->semaphore_obj) { 1256 if (dev_priv->semaphore_obj) {
1263 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1257 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1273,13 +1267,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1273{ 1267{
1274#define MBOX_UPDATE_DWORDS 8 1268#define MBOX_UPDATE_DWORDS 8
1275 struct intel_engine_cs *signaller = signaller_req->engine; 1269 struct intel_engine_cs *signaller = signaller_req->engine;
1276 struct drm_device *dev = signaller->dev; 1270 struct drm_i915_private *dev_priv = signaller_req->i915;
1277 struct drm_i915_private *dev_priv = dev->dev_private;
1278 struct intel_engine_cs *waiter; 1271 struct intel_engine_cs *waiter;
1279 enum intel_engine_id id; 1272 enum intel_engine_id id;
1280 int ret, num_rings; 1273 int ret, num_rings;
1281 1274
1282 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1275 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1283 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1276 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1284#undef MBOX_UPDATE_DWORDS 1277#undef MBOX_UPDATE_DWORDS
1285 1278
@@ -1297,7 +1290,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1297 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1290 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
1298 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1291 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
1299 PIPE_CONTROL_QW_WRITE | 1292 PIPE_CONTROL_QW_WRITE |
1300 PIPE_CONTROL_FLUSH_ENABLE); 1293 PIPE_CONTROL_CS_STALL);
1301 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1294 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1302 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1295 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1303 intel_ring_emit(signaller, seqno); 1296 intel_ring_emit(signaller, seqno);
@@ -1315,13 +1308,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1315{ 1308{
1316#define MBOX_UPDATE_DWORDS 6 1309#define MBOX_UPDATE_DWORDS 6
1317 struct intel_engine_cs *signaller = signaller_req->engine; 1310 struct intel_engine_cs *signaller = signaller_req->engine;
1318 struct drm_device *dev = signaller->dev; 1311 struct drm_i915_private *dev_priv = signaller_req->i915;
1319 struct drm_i915_private *dev_priv = dev->dev_private;
1320 struct intel_engine_cs *waiter; 1312 struct intel_engine_cs *waiter;
1321 enum intel_engine_id id; 1313 enum intel_engine_id id;
1322 int ret, num_rings; 1314 int ret, num_rings;
1323 1315
1324 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1316 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1325 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1317 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1326#undef MBOX_UPDATE_DWORDS 1318#undef MBOX_UPDATE_DWORDS
1327 1319
@@ -1354,14 +1346,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1354 unsigned int num_dwords) 1346 unsigned int num_dwords)
1355{ 1347{
1356 struct intel_engine_cs *signaller = signaller_req->engine; 1348 struct intel_engine_cs *signaller = signaller_req->engine;
1357 struct drm_device *dev = signaller->dev; 1349 struct drm_i915_private *dev_priv = signaller_req->i915;
1358 struct drm_i915_private *dev_priv = dev->dev_private;
1359 struct intel_engine_cs *useless; 1350 struct intel_engine_cs *useless;
1360 enum intel_engine_id id; 1351 enum intel_engine_id id;
1361 int ret, num_rings; 1352 int ret, num_rings;
1362 1353
1363#define MBOX_UPDATE_DWORDS 3 1354#define MBOX_UPDATE_DWORDS 3
1364 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1355 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1365 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1356 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1366#undef MBOX_UPDATE_DWORDS 1357#undef MBOX_UPDATE_DWORDS
1367 1358
@@ -1420,10 +1411,38 @@ gen6_add_request(struct drm_i915_gem_request *req)
1420 return 0; 1411 return 0;
1421} 1412}
1422 1413
1423static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1414static int
1415gen8_render_add_request(struct drm_i915_gem_request *req)
1416{
1417 struct intel_engine_cs *engine = req->engine;
1418 int ret;
1419
1420 if (engine->semaphore.signal)
1421 ret = engine->semaphore.signal(req, 8);
1422 else
1423 ret = intel_ring_begin(req, 8);
1424 if (ret)
1425 return ret;
1426
1427 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
1428 intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1429 PIPE_CONTROL_CS_STALL |
1430 PIPE_CONTROL_QW_WRITE));
1431 intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
1432 intel_ring_emit(engine, 0);
1433 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1434 /* We're thrashing one dword of HWS. */
1435 intel_ring_emit(engine, 0);
1436 intel_ring_emit(engine, MI_USER_INTERRUPT);
1437 intel_ring_emit(engine, MI_NOOP);
1438 __intel_ring_advance(engine);
1439
1440 return 0;
1441}
1442
1443static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
1424 u32 seqno) 1444 u32 seqno)
1425{ 1445{
1426 struct drm_i915_private *dev_priv = dev->dev_private;
1427 return dev_priv->last_seqno < seqno; 1446 return dev_priv->last_seqno < seqno;
1428} 1447}
1429 1448
@@ -1441,7 +1460,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1441 u32 seqno) 1460 u32 seqno)
1442{ 1461{
1443 struct intel_engine_cs *waiter = waiter_req->engine; 1462 struct intel_engine_cs *waiter = waiter_req->engine;
1444 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1463 struct drm_i915_private *dev_priv = waiter_req->i915;
1464 struct i915_hw_ppgtt *ppgtt;
1445 int ret; 1465 int ret;
1446 1466
1447 ret = intel_ring_begin(waiter_req, 4); 1467 ret = intel_ring_begin(waiter_req, 4);
@@ -1450,7 +1470,6 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1450 1470
1451 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1471 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1452 MI_SEMAPHORE_GLOBAL_GTT | 1472 MI_SEMAPHORE_GLOBAL_GTT |
1453 MI_SEMAPHORE_POLL |
1454 MI_SEMAPHORE_SAD_GTE_SDD); 1473 MI_SEMAPHORE_SAD_GTE_SDD);
1455 intel_ring_emit(waiter, seqno); 1474 intel_ring_emit(waiter, seqno);
1456 intel_ring_emit(waiter, 1475 intel_ring_emit(waiter,
@@ -1458,6 +1477,15 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1458 intel_ring_emit(waiter, 1477 intel_ring_emit(waiter,
1459 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1478 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1460 intel_ring_advance(waiter); 1479 intel_ring_advance(waiter);
1480
1481 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1482 * pagetables and we must reload them before executing the batch.
1483 * We do this on the i915_switch_context() following the wait and
1484 * before the dispatch.
1485 */
1486 ppgtt = waiter_req->ctx->ppgtt;
1487 if (ppgtt && waiter_req->engine->id != RCS)
1488 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
1461 return 0; 1489 return 0;
1462} 1490}
1463 1491
@@ -1486,7 +1514,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1486 return ret; 1514 return ret;
1487 1515
1488 /* If seqno wrap happened, omit the wait with no-ops */ 1516 /* If seqno wrap happened, omit the wait with no-ops */
1489 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1517 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
1490 intel_ring_emit(waiter, dw1 | wait_mbox); 1518 intel_ring_emit(waiter, dw1 | wait_mbox);
1491 intel_ring_emit(waiter, seqno); 1519 intel_ring_emit(waiter, seqno);
1492 intel_ring_emit(waiter, 0); 1520 intel_ring_emit(waiter, 0);
@@ -1567,7 +1595,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
1567static void 1595static void
1568gen6_seqno_barrier(struct intel_engine_cs *engine) 1596gen6_seqno_barrier(struct intel_engine_cs *engine)
1569{ 1597{
1570 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1598 struct drm_i915_private *dev_priv = engine->i915;
1571 1599
1572 /* Workaround to force correct ordering between irq and seqno writes on 1600 /* Workaround to force correct ordering between irq and seqno writes on
1573 * ivb (and maybe also on snb) by reading from a CS register (like 1601 * ivb (and maybe also on snb) by reading from a CS register (like
@@ -1616,8 +1644,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1616static bool 1644static bool
1617gen5_ring_get_irq(struct intel_engine_cs *engine) 1645gen5_ring_get_irq(struct intel_engine_cs *engine)
1618{ 1646{
1619 struct drm_device *dev = engine->dev; 1647 struct drm_i915_private *dev_priv = engine->i915;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621 unsigned long flags; 1648 unsigned long flags;
1622 1649
1623 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1650 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1634,8 +1661,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine)
1634static void 1661static void
1635gen5_ring_put_irq(struct intel_engine_cs *engine) 1662gen5_ring_put_irq(struct intel_engine_cs *engine)
1636{ 1663{
1637 struct drm_device *dev = engine->dev; 1664 struct drm_i915_private *dev_priv = engine->i915;
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639 unsigned long flags; 1665 unsigned long flags;
1640 1666
1641 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1667 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1647,8 +1673,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine)
1647static bool 1673static bool
1648i9xx_ring_get_irq(struct intel_engine_cs *engine) 1674i9xx_ring_get_irq(struct intel_engine_cs *engine)
1649{ 1675{
1650 struct drm_device *dev = engine->dev; 1676 struct drm_i915_private *dev_priv = engine->i915;
1651 struct drm_i915_private *dev_priv = dev->dev_private;
1652 unsigned long flags; 1677 unsigned long flags;
1653 1678
1654 if (!intel_irqs_enabled(dev_priv)) 1679 if (!intel_irqs_enabled(dev_priv))
@@ -1668,8 +1693,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine)
1668static void 1693static void
1669i9xx_ring_put_irq(struct intel_engine_cs *engine) 1694i9xx_ring_put_irq(struct intel_engine_cs *engine)
1670{ 1695{
1671 struct drm_device *dev = engine->dev; 1696 struct drm_i915_private *dev_priv = engine->i915;
1672 struct drm_i915_private *dev_priv = dev->dev_private;
1673 unsigned long flags; 1697 unsigned long flags;
1674 1698
1675 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1699 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1684,8 +1708,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine)
1684static bool 1708static bool
1685i8xx_ring_get_irq(struct intel_engine_cs *engine) 1709i8xx_ring_get_irq(struct intel_engine_cs *engine)
1686{ 1710{
1687 struct drm_device *dev = engine->dev; 1711 struct drm_i915_private *dev_priv = engine->i915;
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 unsigned long flags; 1712 unsigned long flags;
1690 1713
1691 if (!intel_irqs_enabled(dev_priv)) 1714 if (!intel_irqs_enabled(dev_priv))
@@ -1705,8 +1728,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine)
1705static void 1728static void
1706i8xx_ring_put_irq(struct intel_engine_cs *engine) 1729i8xx_ring_put_irq(struct intel_engine_cs *engine)
1707{ 1730{
1708 struct drm_device *dev = engine->dev; 1731 struct drm_i915_private *dev_priv = engine->i915;
1709 struct drm_i915_private *dev_priv = dev->dev_private;
1710 unsigned long flags; 1732 unsigned long flags;
1711 1733
1712 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1734 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1759,8 +1781,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
1759static bool 1781static bool
1760gen6_ring_get_irq(struct intel_engine_cs *engine) 1782gen6_ring_get_irq(struct intel_engine_cs *engine)
1761{ 1783{
1762 struct drm_device *dev = engine->dev; 1784 struct drm_i915_private *dev_priv = engine->i915;
1763 struct drm_i915_private *dev_priv = dev->dev_private;
1764 unsigned long flags; 1785 unsigned long flags;
1765 1786
1766 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1787 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1768,10 +1789,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
1768 1789
1769 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1790 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1770 if (engine->irq_refcount++ == 0) { 1791 if (engine->irq_refcount++ == 0) {
1771 if (HAS_L3_DPF(dev) && engine->id == RCS) 1792 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1772 I915_WRITE_IMR(engine, 1793 I915_WRITE_IMR(engine,
1773 ~(engine->irq_enable_mask | 1794 ~(engine->irq_enable_mask |
1774 GT_PARITY_ERROR(dev))); 1795 GT_PARITY_ERROR(dev_priv)));
1775 else 1796 else
1776 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1797 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1777 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1798 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1784,14 +1805,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
1784static void 1805static void
1785gen6_ring_put_irq(struct intel_engine_cs *engine) 1806gen6_ring_put_irq(struct intel_engine_cs *engine)
1786{ 1807{
1787 struct drm_device *dev = engine->dev; 1808 struct drm_i915_private *dev_priv = engine->i915;
1788 struct drm_i915_private *dev_priv = dev->dev_private;
1789 unsigned long flags; 1809 unsigned long flags;
1790 1810
1791 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1811 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1792 if (--engine->irq_refcount == 0) { 1812 if (--engine->irq_refcount == 0) {
1793 if (HAS_L3_DPF(dev) && engine->id == RCS) 1813 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1794 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1814 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1795 else 1815 else
1796 I915_WRITE_IMR(engine, ~0); 1816 I915_WRITE_IMR(engine, ~0);
1797 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1817 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1802,8 +1822,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine)
1802static bool 1822static bool
1803hsw_vebox_get_irq(struct intel_engine_cs *engine) 1823hsw_vebox_get_irq(struct intel_engine_cs *engine)
1804{ 1824{
1805 struct drm_device *dev = engine->dev; 1825 struct drm_i915_private *dev_priv = engine->i915;
1806 struct drm_i915_private *dev_priv = dev->dev_private;
1807 unsigned long flags; 1826 unsigned long flags;
1808 1827
1809 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1828 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1822,8 +1841,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine)
1822static void 1841static void
1823hsw_vebox_put_irq(struct intel_engine_cs *engine) 1842hsw_vebox_put_irq(struct intel_engine_cs *engine)
1824{ 1843{
1825 struct drm_device *dev = engine->dev; 1844 struct drm_i915_private *dev_priv = engine->i915;
1826 struct drm_i915_private *dev_priv = dev->dev_private;
1827 unsigned long flags; 1845 unsigned long flags;
1828 1846
1829 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1847 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1837,8 +1855,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine)
1837static bool 1855static bool
1838gen8_ring_get_irq(struct intel_engine_cs *engine) 1856gen8_ring_get_irq(struct intel_engine_cs *engine)
1839{ 1857{
1840 struct drm_device *dev = engine->dev; 1858 struct drm_i915_private *dev_priv = engine->i915;
1841 struct drm_i915_private *dev_priv = dev->dev_private;
1842 unsigned long flags; 1859 unsigned long flags;
1843 1860
1844 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1861 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1846,7 +1863,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
1846 1863
1847 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1864 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1848 if (engine->irq_refcount++ == 0) { 1865 if (engine->irq_refcount++ == 0) {
1849 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1866 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1850 I915_WRITE_IMR(engine, 1867 I915_WRITE_IMR(engine,
1851 ~(engine->irq_enable_mask | 1868 ~(engine->irq_enable_mask |
1852 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1869 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
@@ -1863,13 +1880,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
1863static void 1880static void
1864gen8_ring_put_irq(struct intel_engine_cs *engine) 1881gen8_ring_put_irq(struct intel_engine_cs *engine)
1865{ 1882{
1866 struct drm_device *dev = engine->dev; 1883 struct drm_i915_private *dev_priv = engine->i915;
1867 struct drm_i915_private *dev_priv = dev->dev_private;
1868 unsigned long flags; 1884 unsigned long flags;
1869 1885
1870 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1886 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1871 if (--engine->irq_refcount == 0) { 1887 if (--engine->irq_refcount == 0) {
1872 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1888 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1873 I915_WRITE_IMR(engine, 1889 I915_WRITE_IMR(engine,
1874 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1890 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1875 } else { 1891 } else {
@@ -1991,12 +2007,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
1991 2007
1992static void cleanup_phys_status_page(struct intel_engine_cs *engine) 2008static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1993{ 2009{
1994 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2010 struct drm_i915_private *dev_priv = engine->i915;
1995 2011
1996 if (!dev_priv->status_page_dmah) 2012 if (!dev_priv->status_page_dmah)
1997 return; 2013 return;
1998 2014
1999 drm_pci_free(engine->dev, dev_priv->status_page_dmah); 2015 drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
2000 engine->status_page.page_addr = NULL; 2016 engine->status_page.page_addr = NULL;
2001} 2017}
2002 2018
@@ -2022,10 +2038,10 @@ static int init_status_page(struct intel_engine_cs *engine)
2022 unsigned flags; 2038 unsigned flags;
2023 int ret; 2039 int ret;
2024 2040
2025 obj = i915_gem_alloc_object(engine->dev, 4096); 2041 obj = i915_gem_object_create(engine->i915->dev, 4096);
2026 if (obj == NULL) { 2042 if (IS_ERR(obj)) {
2027 DRM_ERROR("Failed to allocate status page\n"); 2043 DRM_ERROR("Failed to allocate status page\n");
2028 return -ENOMEM; 2044 return PTR_ERR(obj);
2029 } 2045 }
2030 2046
2031 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2047 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2033,7 +2049,7 @@ static int init_status_page(struct intel_engine_cs *engine)
2033 goto err_unref; 2049 goto err_unref;
2034 2050
2035 flags = 0; 2051 flags = 0;
2036 if (!HAS_LLC(engine->dev)) 2052 if (!HAS_LLC(engine->i915))
2037 /* On g33, we cannot place HWS above 256MiB, so 2053 /* On g33, we cannot place HWS above 256MiB, so
2038 * restrict its pinning to the low mappable arena. 2054 * restrict its pinning to the low mappable arena.
2039 * Though this restriction is not documented for 2055 * Though this restriction is not documented for
@@ -2067,11 +2083,11 @@ err_unref:
2067 2083
2068static int init_phys_status_page(struct intel_engine_cs *engine) 2084static int init_phys_status_page(struct intel_engine_cs *engine)
2069{ 2085{
2070 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2086 struct drm_i915_private *dev_priv = engine->i915;
2071 2087
2072 if (!dev_priv->status_page_dmah) { 2088 if (!dev_priv->status_page_dmah) {
2073 dev_priv->status_page_dmah = 2089 dev_priv->status_page_dmah =
2074 drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); 2090 drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
2075 if (!dev_priv->status_page_dmah) 2091 if (!dev_priv->status_page_dmah)
2076 return -ENOMEM; 2092 return -ENOMEM;
2077 } 2093 }
@@ -2084,20 +2100,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
2084 2100
2085void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2101void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2086{ 2102{
2103 GEM_BUG_ON(ringbuf->vma == NULL);
2104 GEM_BUG_ON(ringbuf->virtual_start == NULL);
2105
2087 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 2106 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
2088 i915_gem_object_unpin_map(ringbuf->obj); 2107 i915_gem_object_unpin_map(ringbuf->obj);
2089 else 2108 else
2090 iounmap(ringbuf->virtual_start); 2109 i915_vma_unpin_iomap(ringbuf->vma);
2091 ringbuf->virtual_start = NULL; 2110 ringbuf->virtual_start = NULL;
2092 ringbuf->vma = NULL; 2111
2093 i915_gem_object_ggtt_unpin(ringbuf->obj); 2112 i915_gem_object_ggtt_unpin(ringbuf->obj);
2113 ringbuf->vma = NULL;
2094} 2114}
2095 2115
2096int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2116int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
2097 struct intel_ringbuffer *ringbuf) 2117 struct intel_ringbuffer *ringbuf)
2098{ 2118{
2099 struct drm_i915_private *dev_priv = to_i915(dev);
2100 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2101 struct drm_i915_gem_object *obj = ringbuf->obj; 2119 struct drm_i915_gem_object *obj = ringbuf->obj;
2102 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2120 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2103 unsigned flags = PIN_OFFSET_BIAS | 4096; 2121 unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2131,10 +2149,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2131 /* Access through the GTT requires the device to be awake. */ 2149 /* Access through the GTT requires the device to be awake. */
2132 assert_rpm_wakelock_held(dev_priv); 2150 assert_rpm_wakelock_held(dev_priv);
2133 2151
2134 addr = ioremap_wc(ggtt->mappable_base + 2152 addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
2135 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2153 if (IS_ERR(addr)) {
2136 if (addr == NULL) { 2154 ret = PTR_ERR(addr);
2137 ret = -ENOMEM;
2138 goto err_unpin; 2155 goto err_unpin;
2139 } 2156 }
2140 } 2157 }
@@ -2163,9 +2180,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
2163 if (!HAS_LLC(dev)) 2180 if (!HAS_LLC(dev))
2164 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2181 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
2165 if (obj == NULL) 2182 if (obj == NULL)
2166 obj = i915_gem_alloc_object(dev, ringbuf->size); 2183 obj = i915_gem_object_create(dev, ringbuf->size);
2167 if (obj == NULL) 2184 if (IS_ERR(obj))
2168 return -ENOMEM; 2185 return PTR_ERR(obj);
2169 2186
2170 /* mark ring buffers as read-only from GPU side by default */ 2187 /* mark ring buffers as read-only from GPU side by default */
2171 obj->gt_ro = 1; 2188 obj->gt_ro = 1;
@@ -2197,13 +2214,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2197 * of the buffer. 2214 * of the buffer.
2198 */ 2215 */
2199 ring->effective_size = size; 2216 ring->effective_size = size;
2200 if (IS_I830(engine->dev) || IS_845G(engine->dev)) 2217 if (IS_I830(engine->i915) || IS_845G(engine->i915))
2201 ring->effective_size -= 2 * CACHELINE_BYTES; 2218 ring->effective_size -= 2 * CACHELINE_BYTES;
2202 2219
2203 ring->last_retired_head = -1; 2220 ring->last_retired_head = -1;
2204 intel_ring_update_space(ring); 2221 intel_ring_update_space(ring);
2205 2222
2206 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2223 ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
2207 if (ret) { 2224 if (ret) {
2208 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2225 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2209 engine->name, ret); 2226 engine->name, ret);
@@ -2226,12 +2243,13 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
2226static int intel_init_ring_buffer(struct drm_device *dev, 2243static int intel_init_ring_buffer(struct drm_device *dev,
2227 struct intel_engine_cs *engine) 2244 struct intel_engine_cs *engine)
2228{ 2245{
2246 struct drm_i915_private *dev_priv = to_i915(dev);
2229 struct intel_ringbuffer *ringbuf; 2247 struct intel_ringbuffer *ringbuf;
2230 int ret; 2248 int ret;
2231 2249
2232 WARN_ON(engine->buffer); 2250 WARN_ON(engine->buffer);
2233 2251
2234 engine->dev = dev; 2252 engine->i915 = dev_priv;
2235 INIT_LIST_HEAD(&engine->active_list); 2253 INIT_LIST_HEAD(&engine->active_list);
2236 INIT_LIST_HEAD(&engine->request_list); 2254 INIT_LIST_HEAD(&engine->request_list);
2237 INIT_LIST_HEAD(&engine->execlist_queue); 2255 INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2249,7 +2267,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2249 } 2267 }
2250 engine->buffer = ringbuf; 2268 engine->buffer = ringbuf;
2251 2269
2252 if (I915_NEED_GFX_HWS(dev)) { 2270 if (I915_NEED_GFX_HWS(dev_priv)) {
2253 ret = init_status_page(engine); 2271 ret = init_status_page(engine);
2254 if (ret) 2272 if (ret)
2255 goto error; 2273 goto error;
@@ -2260,7 +2278,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2260 goto error; 2278 goto error;
2261 } 2279 }
2262 2280
2263 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2281 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
2264 if (ret) { 2282 if (ret) {
2265 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2283 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
2266 engine->name, ret); 2284 engine->name, ret);
@@ -2286,11 +2304,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2286 if (!intel_engine_initialized(engine)) 2304 if (!intel_engine_initialized(engine))
2287 return; 2305 return;
2288 2306
2289 dev_priv = to_i915(engine->dev); 2307 dev_priv = engine->i915;
2290 2308
2291 if (engine->buffer) { 2309 if (engine->buffer) {
2292 intel_stop_engine(engine); 2310 intel_stop_engine(engine);
2293 WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2311 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2294 2312
2295 intel_unpin_ringbuffer_obj(engine->buffer); 2313 intel_unpin_ringbuffer_obj(engine->buffer);
2296 intel_ringbuffer_free(engine->buffer); 2314 intel_ringbuffer_free(engine->buffer);
@@ -2300,7 +2318,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2300 if (engine->cleanup) 2318 if (engine->cleanup)
2301 engine->cleanup(engine); 2319 engine->cleanup(engine);
2302 2320
2303 if (I915_NEED_GFX_HWS(engine->dev)) { 2321 if (I915_NEED_GFX_HWS(dev_priv)) {
2304 cleanup_status_page(engine); 2322 cleanup_status_page(engine);
2305 } else { 2323 } else {
2306 WARN_ON(engine->id != RCS); 2324 WARN_ON(engine->id != RCS);
@@ -2309,7 +2327,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2309 2327
2310 i915_cmd_parser_fini_ring(engine); 2328 i915_cmd_parser_fini_ring(engine);
2311 i915_gem_batch_pool_fini(&engine->batch_pool); 2329 i915_gem_batch_pool_fini(&engine->batch_pool);
2312 engine->dev = NULL; 2330 engine->i915 = NULL;
2313} 2331}
2314 2332
2315int intel_engine_idle(struct intel_engine_cs *engine) 2333int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2332,46 +2350,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
2332 2350
2333int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2351int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2334{ 2352{
2335 request->ringbuf = request->engine->buffer; 2353 int ret;
2336 return 0;
2337}
2338 2354
2339int intel_ring_reserve_space(struct drm_i915_gem_request *request) 2355 /* Flush enough space to reduce the likelihood of waiting after
2340{ 2356 * we start building the request - in which case we will just
2341 /* 2357 * have to repeat work.
2342 * The first call merely notes the reserve request and is common for
2343 * all back ends. The subsequent localised _begin() call actually
2344 * ensures that the reservation is available. Without the begin, if
2345 * the request creator immediately submitted the request without
2346 * adding any commands to it then there might not actually be
2347 * sufficient room for the submission commands.
2348 */ 2358 */
2349 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 2359 request->reserved_space += LEGACY_REQUEST_SIZE;
2350 2360
2351 return intel_ring_begin(request, 0); 2361 request->ringbuf = request->engine->buffer;
2352}
2353
2354void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
2355{
2356 GEM_BUG_ON(ringbuf->reserved_size);
2357 ringbuf->reserved_size = size;
2358}
2359
2360void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
2361{
2362 GEM_BUG_ON(!ringbuf->reserved_size);
2363 ringbuf->reserved_size = 0;
2364}
2365 2362
2366void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2363 ret = intel_ring_begin(request, 0);
2367{ 2364 if (ret)
2368 GEM_BUG_ON(!ringbuf->reserved_size); 2365 return ret;
2369 ringbuf->reserved_size = 0;
2370}
2371 2366
2372void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2367 request->reserved_space -= LEGACY_REQUEST_SIZE;
2373{ 2368 return 0;
2374 GEM_BUG_ON(ringbuf->reserved_size);
2375} 2369}
2376 2370
2377static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2371static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2393,7 +2387,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2393 * 2387 *
2394 * See also i915_gem_request_alloc() and i915_add_request(). 2388 * See also i915_gem_request_alloc() and i915_add_request().
2395 */ 2389 */
2396 GEM_BUG_ON(!ringbuf->reserved_size); 2390 GEM_BUG_ON(!req->reserved_space);
2397 2391
2398 list_for_each_entry(target, &engine->request_list, list) { 2392 list_for_each_entry(target, &engine->request_list, list) {
2399 unsigned space; 2393 unsigned space;
@@ -2428,7 +2422,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2428 int total_bytes, wait_bytes; 2422 int total_bytes, wait_bytes;
2429 bool need_wrap = false; 2423 bool need_wrap = false;
2430 2424
2431 total_bytes = bytes + ringbuf->reserved_size; 2425 total_bytes = bytes + req->reserved_space;
2432 2426
2433 if (unlikely(bytes > remain_usable)) { 2427 if (unlikely(bytes > remain_usable)) {
2434 /* 2428 /*
@@ -2444,7 +2438,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2444 * and only need to effectively wait for the reserved 2438 * and only need to effectively wait for the reserved
2445 * size space from the start of ringbuffer. 2439 * size space from the start of ringbuffer.
2446 */ 2440 */
2447 wait_bytes = remain_actual + ringbuf->reserved_size; 2441 wait_bytes = remain_actual + req->reserved_space;
2448 } else { 2442 } else {
2449 /* No wrapping required, just waiting. */ 2443 /* No wrapping required, just waiting. */
2450 wait_bytes = total_bytes; 2444 wait_bytes = total_bytes;
@@ -2501,7 +2495,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2501 2495
2502void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2496void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2503{ 2497{
2504 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2498 struct drm_i915_private *dev_priv = engine->i915;
2505 2499
2506 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 2500 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
2507 * so long as the semaphore value in the register/page is greater 2501 * so long as the semaphore value in the register/page is greater
@@ -2511,7 +2505,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2511 * the semaphore value, then when the seqno moves backwards all 2505 * the semaphore value, then when the seqno moves backwards all
2512 * future waits will complete instantly (causing rendering corruption). 2506 * future waits will complete instantly (causing rendering corruption).
2513 */ 2507 */
2514 if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { 2508 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
2515 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 2509 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
2516 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 2510 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
2517 if (HAS_VEBOX(dev_priv)) 2511 if (HAS_VEBOX(dev_priv))
@@ -2537,7 +2531,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2537static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2531static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
2538 u32 value) 2532 u32 value)
2539{ 2533{
2540 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2534 struct drm_i915_private *dev_priv = engine->i915;
2541 2535
2542 /* Every tail move must follow the sequence below */ 2536 /* Every tail move must follow the sequence below */
2543 2537
@@ -2579,7 +2573,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2579 return ret; 2573 return ret;
2580 2574
2581 cmd = MI_FLUSH_DW; 2575 cmd = MI_FLUSH_DW;
2582 if (INTEL_INFO(engine->dev)->gen >= 8) 2576 if (INTEL_GEN(req->i915) >= 8)
2583 cmd += 1; 2577 cmd += 1;
2584 2578
2585 /* We always require a command barrier so that subsequent 2579 /* We always require a command barrier so that subsequent
@@ -2601,7 +2595,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2601 intel_ring_emit(engine, cmd); 2595 intel_ring_emit(engine, cmd);
2602 intel_ring_emit(engine, 2596 intel_ring_emit(engine,
2603 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2597 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2604 if (INTEL_INFO(engine->dev)->gen >= 8) { 2598 if (INTEL_GEN(req->i915) >= 8) {
2605 intel_ring_emit(engine, 0); /* upper addr */ 2599 intel_ring_emit(engine, 0); /* upper addr */
2606 intel_ring_emit(engine, 0); /* value */ 2600 intel_ring_emit(engine, 0); /* value */
2607 } else { 2601 } else {
@@ -2692,7 +2686,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2692 u32 invalidate, u32 flush) 2686 u32 invalidate, u32 flush)
2693{ 2687{
2694 struct intel_engine_cs *engine = req->engine; 2688 struct intel_engine_cs *engine = req->engine;
2695 struct drm_device *dev = engine->dev;
2696 uint32_t cmd; 2689 uint32_t cmd;
2697 int ret; 2690 int ret;
2698 2691
@@ -2701,7 +2694,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2701 return ret; 2694 return ret;
2702 2695
2703 cmd = MI_FLUSH_DW; 2696 cmd = MI_FLUSH_DW;
2704 if (INTEL_INFO(dev)->gen >= 8) 2697 if (INTEL_GEN(req->i915) >= 8)
2705 cmd += 1; 2698 cmd += 1;
2706 2699
2707 /* We always require a command barrier so that subsequent 2700 /* We always require a command barrier so that subsequent
@@ -2722,7 +2715,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2722 intel_ring_emit(engine, cmd); 2715 intel_ring_emit(engine, cmd);
2723 intel_ring_emit(engine, 2716 intel_ring_emit(engine,
2724 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2717 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2725 if (INTEL_INFO(dev)->gen >= 8) { 2718 if (INTEL_GEN(req->i915) >= 8) {
2726 intel_ring_emit(engine, 0); /* upper addr */ 2719 intel_ring_emit(engine, 0); /* upper addr */
2727 intel_ring_emit(engine, 0); /* value */ 2720 intel_ring_emit(engine, 0); /* value */
2728 } else { 2721 } else {
@@ -2747,10 +2740,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2747 engine->hw_id = 0; 2740 engine->hw_id = 0;
2748 engine->mmio_base = RENDER_RING_BASE; 2741 engine->mmio_base = RENDER_RING_BASE;
2749 2742
2750 if (INTEL_INFO(dev)->gen >= 8) { 2743 if (INTEL_GEN(dev_priv) >= 8) {
2751 if (i915_semaphore_is_enabled(dev)) { 2744 if (i915_semaphore_is_enabled(dev_priv)) {
2752 obj = i915_gem_alloc_object(dev, 4096); 2745 obj = i915_gem_object_create(dev, 4096);
2753 if (obj == NULL) { 2746 if (IS_ERR(obj)) {
2754 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2747 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2755 i915.semaphores = 0; 2748 i915.semaphores = 0;
2756 } else { 2749 } else {
@@ -2766,25 +2759,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2766 } 2759 }
2767 2760
2768 engine->init_context = intel_rcs_ctx_init; 2761 engine->init_context = intel_rcs_ctx_init;
2769 engine->add_request = gen6_add_request; 2762 engine->add_request = gen8_render_add_request;
2770 engine->flush = gen8_render_ring_flush; 2763 engine->flush = gen8_render_ring_flush;
2771 engine->irq_get = gen8_ring_get_irq; 2764 engine->irq_get = gen8_ring_get_irq;
2772 engine->irq_put = gen8_ring_put_irq; 2765 engine->irq_put = gen8_ring_put_irq;
2773 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2766 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2774 engine->irq_seqno_barrier = gen6_seqno_barrier;
2775 engine->get_seqno = ring_get_seqno; 2767 engine->get_seqno = ring_get_seqno;
2776 engine->set_seqno = ring_set_seqno; 2768 engine->set_seqno = ring_set_seqno;
2777 if (i915_semaphore_is_enabled(dev)) { 2769 if (i915_semaphore_is_enabled(dev_priv)) {
2778 WARN_ON(!dev_priv->semaphore_obj); 2770 WARN_ON(!dev_priv->semaphore_obj);
2779 engine->semaphore.sync_to = gen8_ring_sync; 2771 engine->semaphore.sync_to = gen8_ring_sync;
2780 engine->semaphore.signal = gen8_rcs_signal; 2772 engine->semaphore.signal = gen8_rcs_signal;
2781 GEN8_RING_SEMAPHORE_INIT(engine); 2773 GEN8_RING_SEMAPHORE_INIT(engine);
2782 } 2774 }
2783 } else if (INTEL_INFO(dev)->gen >= 6) { 2775 } else if (INTEL_GEN(dev_priv) >= 6) {
2784 engine->init_context = intel_rcs_ctx_init; 2776 engine->init_context = intel_rcs_ctx_init;
2785 engine->add_request = gen6_add_request; 2777 engine->add_request = gen6_add_request;
2786 engine->flush = gen7_render_ring_flush; 2778 engine->flush = gen7_render_ring_flush;
2787 if (INTEL_INFO(dev)->gen == 6) 2779 if (IS_GEN6(dev_priv))
2788 engine->flush = gen6_render_ring_flush; 2780 engine->flush = gen6_render_ring_flush;
2789 engine->irq_get = gen6_ring_get_irq; 2781 engine->irq_get = gen6_ring_get_irq;
2790 engine->irq_put = gen6_ring_put_irq; 2782 engine->irq_put = gen6_ring_put_irq;
@@ -2792,7 +2784,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2792 engine->irq_seqno_barrier = gen6_seqno_barrier; 2784 engine->irq_seqno_barrier = gen6_seqno_barrier;
2793 engine->get_seqno = ring_get_seqno; 2785 engine->get_seqno = ring_get_seqno;
2794 engine->set_seqno = ring_set_seqno; 2786 engine->set_seqno = ring_set_seqno;
2795 if (i915_semaphore_is_enabled(dev)) { 2787 if (i915_semaphore_is_enabled(dev_priv)) {
2796 engine->semaphore.sync_to = gen6_ring_sync; 2788 engine->semaphore.sync_to = gen6_ring_sync;
2797 engine->semaphore.signal = gen6_signal; 2789 engine->semaphore.signal = gen6_signal;
2798 /* 2790 /*
@@ -2813,7 +2805,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2813 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2805 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2814 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2806 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2815 } 2807 }
2816 } else if (IS_GEN5(dev)) { 2808 } else if (IS_GEN5(dev_priv)) {
2817 engine->add_request = pc_render_add_request; 2809 engine->add_request = pc_render_add_request;
2818 engine->flush = gen4_render_ring_flush; 2810 engine->flush = gen4_render_ring_flush;
2819 engine->get_seqno = pc_render_get_seqno; 2811 engine->get_seqno = pc_render_get_seqno;
@@ -2824,13 +2816,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2824 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2816 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2825 } else { 2817 } else {
2826 engine->add_request = i9xx_add_request; 2818 engine->add_request = i9xx_add_request;
2827 if (INTEL_INFO(dev)->gen < 4) 2819 if (INTEL_GEN(dev_priv) < 4)
2828 engine->flush = gen2_render_ring_flush; 2820 engine->flush = gen2_render_ring_flush;
2829 else 2821 else
2830 engine->flush = gen4_render_ring_flush; 2822 engine->flush = gen4_render_ring_flush;
2831 engine->get_seqno = ring_get_seqno; 2823 engine->get_seqno = ring_get_seqno;
2832 engine->set_seqno = ring_set_seqno; 2824 engine->set_seqno = ring_set_seqno;
2833 if (IS_GEN2(dev)) { 2825 if (IS_GEN2(dev_priv)) {
2834 engine->irq_get = i8xx_ring_get_irq; 2826 engine->irq_get = i8xx_ring_get_irq;
2835 engine->irq_put = i8xx_ring_put_irq; 2827 engine->irq_put = i8xx_ring_put_irq;
2836 } else { 2828 } else {
@@ -2841,15 +2833,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2841 } 2833 }
2842 engine->write_tail = ring_write_tail; 2834 engine->write_tail = ring_write_tail;
2843 2835
2844 if (IS_HASWELL(dev)) 2836 if (IS_HASWELL(dev_priv))
2845 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2837 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2846 else if (IS_GEN8(dev)) 2838 else if (IS_GEN8(dev_priv))
2847 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2839 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2848 else if (INTEL_INFO(dev)->gen >= 6) 2840 else if (INTEL_GEN(dev_priv) >= 6)
2849 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2841 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2850 else if (INTEL_INFO(dev)->gen >= 4) 2842 else if (INTEL_GEN(dev_priv) >= 4)
2851 engine->dispatch_execbuffer = i965_dispatch_execbuffer; 2843 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2852 else if (IS_I830(dev) || IS_845G(dev)) 2844 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2853 engine->dispatch_execbuffer = i830_dispatch_execbuffer; 2845 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2854 else 2846 else
2855 engine->dispatch_execbuffer = i915_dispatch_execbuffer; 2847 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
@@ -2857,11 +2849,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2857 engine->cleanup = render_ring_cleanup; 2849 engine->cleanup = render_ring_cleanup;
2858 2850
2859 /* Workaround batchbuffer to combat CS tlb bug. */ 2851 /* Workaround batchbuffer to combat CS tlb bug. */
2860 if (HAS_BROKEN_CS_TLB(dev)) { 2852 if (HAS_BROKEN_CS_TLB(dev_priv)) {
2861 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2853 obj = i915_gem_object_create(dev, I830_WA_SIZE);
2862 if (obj == NULL) { 2854 if (IS_ERR(obj)) {
2863 DRM_ERROR("Failed to allocate batch bo\n"); 2855 DRM_ERROR("Failed to allocate batch bo\n");
2864 return -ENOMEM; 2856 return PTR_ERR(obj);
2865 } 2857 }
2866 2858
2867 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2859 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
@@ -2879,7 +2871,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2879 if (ret) 2871 if (ret)
2880 return ret; 2872 return ret;
2881 2873
2882 if (INTEL_INFO(dev)->gen >= 5) { 2874 if (INTEL_GEN(dev_priv) >= 5) {
2883 ret = intel_init_pipe_control(engine); 2875 ret = intel_init_pipe_control(engine);
2884 if (ret) 2876 if (ret)
2885 return ret; 2877 return ret;
@@ -2899,24 +2891,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2899 engine->hw_id = 1; 2891 engine->hw_id = 1;
2900 2892
2901 engine->write_tail = ring_write_tail; 2893 engine->write_tail = ring_write_tail;
2902 if (INTEL_INFO(dev)->gen >= 6) { 2894 if (INTEL_GEN(dev_priv) >= 6) {
2903 engine->mmio_base = GEN6_BSD_RING_BASE; 2895 engine->mmio_base = GEN6_BSD_RING_BASE;
2904 /* gen6 bsd needs a special wa for tail updates */ 2896 /* gen6 bsd needs a special wa for tail updates */
2905 if (IS_GEN6(dev)) 2897 if (IS_GEN6(dev_priv))
2906 engine->write_tail = gen6_bsd_ring_write_tail; 2898 engine->write_tail = gen6_bsd_ring_write_tail;
2907 engine->flush = gen6_bsd_ring_flush; 2899 engine->flush = gen6_bsd_ring_flush;
2908 engine->add_request = gen6_add_request; 2900 engine->add_request = gen6_add_request;
2909 engine->irq_seqno_barrier = gen6_seqno_barrier; 2901 engine->irq_seqno_barrier = gen6_seqno_barrier;
2910 engine->get_seqno = ring_get_seqno; 2902 engine->get_seqno = ring_get_seqno;
2911 engine->set_seqno = ring_set_seqno; 2903 engine->set_seqno = ring_set_seqno;
2912 if (INTEL_INFO(dev)->gen >= 8) { 2904 if (INTEL_GEN(dev_priv) >= 8) {
2913 engine->irq_enable_mask = 2905 engine->irq_enable_mask =
2914 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2906 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2915 engine->irq_get = gen8_ring_get_irq; 2907 engine->irq_get = gen8_ring_get_irq;
2916 engine->irq_put = gen8_ring_put_irq; 2908 engine->irq_put = gen8_ring_put_irq;
2917 engine->dispatch_execbuffer = 2909 engine->dispatch_execbuffer =
2918 gen8_ring_dispatch_execbuffer; 2910 gen8_ring_dispatch_execbuffer;
2919 if (i915_semaphore_is_enabled(dev)) { 2911 if (i915_semaphore_is_enabled(dev_priv)) {
2920 engine->semaphore.sync_to = gen8_ring_sync; 2912 engine->semaphore.sync_to = gen8_ring_sync;
2921 engine->semaphore.signal = gen8_xcs_signal; 2913 engine->semaphore.signal = gen8_xcs_signal;
2922 GEN8_RING_SEMAPHORE_INIT(engine); 2914 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -2927,7 +2919,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2927 engine->irq_put = gen6_ring_put_irq; 2919 engine->irq_put = gen6_ring_put_irq;
2928 engine->dispatch_execbuffer = 2920 engine->dispatch_execbuffer =
2929 gen6_ring_dispatch_execbuffer; 2921 gen6_ring_dispatch_execbuffer;
2930 if (i915_semaphore_is_enabled(dev)) { 2922 if (i915_semaphore_is_enabled(dev_priv)) {
2931 engine->semaphore.sync_to = gen6_ring_sync; 2923 engine->semaphore.sync_to = gen6_ring_sync;
2932 engine->semaphore.signal = gen6_signal; 2924 engine->semaphore.signal = gen6_signal;
2933 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2925 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
@@ -2948,7 +2940,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2948 engine->add_request = i9xx_add_request; 2940 engine->add_request = i9xx_add_request;
2949 engine->get_seqno = ring_get_seqno; 2941 engine->get_seqno = ring_get_seqno;
2950 engine->set_seqno = ring_set_seqno; 2942 engine->set_seqno = ring_set_seqno;
2951 if (IS_GEN5(dev)) { 2943 if (IS_GEN5(dev_priv)) {
2952 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2944 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2953 engine->irq_get = gen5_ring_get_irq; 2945 engine->irq_get = gen5_ring_get_irq;
2954 engine->irq_put = gen5_ring_put_irq; 2946 engine->irq_put = gen5_ring_put_irq;
@@ -2990,7 +2982,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2990 engine->irq_put = gen8_ring_put_irq; 2982 engine->irq_put = gen8_ring_put_irq;
2991 engine->dispatch_execbuffer = 2983 engine->dispatch_execbuffer =
2992 gen8_ring_dispatch_execbuffer; 2984 gen8_ring_dispatch_execbuffer;
2993 if (i915_semaphore_is_enabled(dev)) { 2985 if (i915_semaphore_is_enabled(dev_priv)) {
2994 engine->semaphore.sync_to = gen8_ring_sync; 2986 engine->semaphore.sync_to = gen8_ring_sync;
2995 engine->semaphore.signal = gen8_xcs_signal; 2987 engine->semaphore.signal = gen8_xcs_signal;
2996 GEN8_RING_SEMAPHORE_INIT(engine); 2988 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3017,13 +3009,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
3017 engine->irq_seqno_barrier = gen6_seqno_barrier; 3009 engine->irq_seqno_barrier = gen6_seqno_barrier;
3018 engine->get_seqno = ring_get_seqno; 3010 engine->get_seqno = ring_get_seqno;
3019 engine->set_seqno = ring_set_seqno; 3011 engine->set_seqno = ring_set_seqno;
3020 if (INTEL_INFO(dev)->gen >= 8) { 3012 if (INTEL_GEN(dev_priv) >= 8) {
3021 engine->irq_enable_mask = 3013 engine->irq_enable_mask =
3022 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 3014 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
3023 engine->irq_get = gen8_ring_get_irq; 3015 engine->irq_get = gen8_ring_get_irq;
3024 engine->irq_put = gen8_ring_put_irq; 3016 engine->irq_put = gen8_ring_put_irq;
3025 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3017 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3026 if (i915_semaphore_is_enabled(dev)) { 3018 if (i915_semaphore_is_enabled(dev_priv)) {
3027 engine->semaphore.sync_to = gen8_ring_sync; 3019 engine->semaphore.sync_to = gen8_ring_sync;
3028 engine->semaphore.signal = gen8_xcs_signal; 3020 engine->semaphore.signal = gen8_xcs_signal;
3029 GEN8_RING_SEMAPHORE_INIT(engine); 3021 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3033,7 +3025,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
3033 engine->irq_get = gen6_ring_get_irq; 3025 engine->irq_get = gen6_ring_get_irq;
3034 engine->irq_put = gen6_ring_put_irq; 3026 engine->irq_put = gen6_ring_put_irq;
3035 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3027 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3036 if (i915_semaphore_is_enabled(dev)) { 3028 if (i915_semaphore_is_enabled(dev_priv)) {
3037 engine->semaphore.signal = gen6_signal; 3029 engine->semaphore.signal = gen6_signal;
3038 engine->semaphore.sync_to = gen6_ring_sync; 3030 engine->semaphore.sync_to = gen6_ring_sync;
3039 /* 3031 /*
@@ -3078,13 +3070,13 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
3078 engine->get_seqno = ring_get_seqno; 3070 engine->get_seqno = ring_get_seqno;
3079 engine->set_seqno = ring_set_seqno; 3071 engine->set_seqno = ring_set_seqno;
3080 3072
3081 if (INTEL_INFO(dev)->gen >= 8) { 3073 if (INTEL_GEN(dev_priv) >= 8) {
3082 engine->irq_enable_mask = 3074 engine->irq_enable_mask =
3083 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 3075 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
3084 engine->irq_get = gen8_ring_get_irq; 3076 engine->irq_get = gen8_ring_get_irq;
3085 engine->irq_put = gen8_ring_put_irq; 3077 engine->irq_put = gen8_ring_put_irq;
3086 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3078 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3087 if (i915_semaphore_is_enabled(dev)) { 3079 if (i915_semaphore_is_enabled(dev_priv)) {
3088 engine->semaphore.sync_to = gen8_ring_sync; 3080 engine->semaphore.sync_to = gen8_ring_sync;
3089 engine->semaphore.signal = gen8_xcs_signal; 3081 engine->semaphore.signal = gen8_xcs_signal;
3090 GEN8_RING_SEMAPHORE_INIT(engine); 3082 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3094,7 +3086,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
3094 engine->irq_get = hsw_vebox_get_irq; 3086 engine->irq_get = hsw_vebox_get_irq;
3095 engine->irq_put = hsw_vebox_put_irq; 3087 engine->irq_put = hsw_vebox_put_irq;
3096 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3088 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3097 if (i915_semaphore_is_enabled(dev)) { 3089 if (i915_semaphore_is_enabled(dev_priv)) {
3098 engine->semaphore.sync_to = gen6_ring_sync; 3090 engine->semaphore.sync_to = gen6_ring_sync;
3099 engine->semaphore.signal = gen6_signal; 3091 engine->semaphore.signal = gen6_signal;
3100 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 3092 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ff126485d398..929e7b4af2a4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -107,7 +107,6 @@ struct intel_ringbuffer {
107 int space; 107 int space;
108 int size; 108 int size;
109 int effective_size; 109 int effective_size;
110 int reserved_size;
111 110
112 /** We track the position of the requests in the ring buffer, and 111 /** We track the position of the requests in the ring buffer, and
113 * when each is retired we increment last_retired_head as the GPU 112 * when each is retired we increment last_retired_head as the GPU
@@ -142,7 +141,8 @@ struct i915_ctx_workarounds {
142 struct drm_i915_gem_object *obj; 141 struct drm_i915_gem_object *obj;
143}; 142};
144 143
145struct intel_engine_cs { 144struct intel_engine_cs {
145 struct drm_i915_private *i915;
146 const char *name; 146 const char *name;
147 enum intel_engine_id { 147 enum intel_engine_id {
148 RCS = 0, 148 RCS = 0,
@@ -157,7 +157,6 @@ struct intel_engine_cs {
157 unsigned int hw_id; 157 unsigned int hw_id;
158 unsigned int guc_id; /* XXX same as hw_id? */ 158 unsigned int guc_id; /* XXX same as hw_id? */
159 u32 mmio_base; 159 u32 mmio_base;
160 struct drm_device *dev;
161 struct intel_ringbuffer *buffer; 160 struct intel_ringbuffer *buffer;
162 struct list_head buffers; 161 struct list_head buffers;
163 162
@@ -268,7 +267,6 @@ struct intel_engine_cs {
268 struct tasklet_struct irq_tasklet; 267 struct tasklet_struct irq_tasklet;
269 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ 268 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
270 struct list_head execlist_queue; 269 struct list_head execlist_queue;
271 struct list_head execlist_retired_req_list;
272 unsigned int fw_domains; 270 unsigned int fw_domains;
273 unsigned int next_context_status_buffer; 271 unsigned int next_context_status_buffer;
274 unsigned int idle_lite_restore_wa; 272 unsigned int idle_lite_restore_wa;
@@ -352,7 +350,7 @@ struct intel_engine_cs {
352static inline bool 350static inline bool
353intel_engine_initialized(struct intel_engine_cs *engine) 351intel_engine_initialized(struct intel_engine_cs *engine)
354{ 352{
355 return engine->dev != NULL; 353 return engine->i915 != NULL;
356} 354}
357 355
358static inline unsigned 356static inline unsigned
@@ -427,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
427 425
428struct intel_ringbuffer * 426struct intel_ringbuffer *
429intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 427intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
430int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 428int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
431 struct intel_ringbuffer *ringbuf); 429 struct intel_ringbuffer *ringbuf);
432void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 430void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
433void intel_ringbuffer_free(struct intel_ringbuffer *ring); 431void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -486,26 +484,15 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
486/* 484/*
487 * Arbitrary size for largest possible 'add request' sequence. The code paths 485 * Arbitrary size for largest possible 'add request' sequence. The code paths
488 * are complex and variable. Empirical measurement shows that the worst case 486 * are complex and variable. Empirical measurement shows that the worst case
489 * is ILK at 136 words. Reserving too much is better than reserving too little 487 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
490 * as that allows for corner cases that might have been missed. So the figure 488 * we need to allocate double the largest single packet within that emission
491 * has been rounded up to 160 words. 489 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
492 */ 490 */
493#define MIN_SPACE_FOR_ADD_REQUEST 160 491#define MIN_SPACE_FOR_ADD_REQUEST 336
494 492
495/* 493static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
496 * Reserve space in the ring to guarantee that the i915_add_request() call 494{
497 * will always have sufficient room to do its stuff. The request creation 495 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
498 * code calls this automatically. 496}
499 */
500void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
501/* Cancel the reservation, e.g. because the request is being discarded. */
502void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
503/* Use the reserved space - for use by i915_add_request() only. */
504void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
505/* Finish with the reserved space - for use by i915_add_request() only. */
506void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
507
508/* Legacy ringbuffer specific portion of reservation code: */
509int intel_ring_reserve_space(struct drm_i915_gem_request *request);
510 497
511#endif /* _INTEL_RINGBUFFER_H_ */ 498#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7fb1da4e7fc3..b69b935516fb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -948,6 +948,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
948 */ 948 */
949 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 949 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
950 I915_WRITE(CBR1_VLV, 0); 950 I915_WRITE(CBR1_VLV, 0);
951
952 WARN_ON(dev_priv->rawclk_freq == 0);
953
954 I915_WRITE(RAWCLK_FREQ_VLV,
955 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
951} 956}
952 957
953static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 958static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0f3e2303e0e9..97b1a54eb09f 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
80 */ 80 */
81void intel_pipe_update_start(struct intel_crtc *crtc) 81void intel_pipe_update_start(struct intel_crtc *crtc)
82{ 82{
83 struct drm_device *dev = crtc->base.dev;
84 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 83 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
85 enum pipe pipe = crtc->pipe;
86 long timeout = msecs_to_jiffies_timeout(1); 84 long timeout = msecs_to_jiffies_timeout(1);
87 int scanline, min, max, vblank_start; 85 int scanline, min, max, vblank_start;
88 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 86 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
139 137
140 crtc->debug.scanline_start = scanline; 138 crtc->debug.scanline_start = scanline;
141 crtc->debug.start_vbl_time = ktime_get(); 139 crtc->debug.start_vbl_time = ktime_get();
142 crtc->debug.start_vbl_count = 140 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
143 dev->driver->get_vblank_counter(dev, pipe);
144 141
145 trace_i915_pipe_update_vblank_evaded(crtc); 142 trace_i915_pipe_update_vblank_evaded(crtc);
146} 143}
@@ -154,14 +151,19 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
154 * re-enables interrupts and verifies the update was actually completed 151 * re-enables interrupts and verifies the update was actually completed
155 * before a vblank using the value of @start_vbl_count. 152 * before a vblank using the value of @start_vbl_count.
156 */ 153 */
157void intel_pipe_update_end(struct intel_crtc *crtc) 154void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
158{ 155{
159 struct drm_device *dev = crtc->base.dev;
160 enum pipe pipe = crtc->pipe; 156 enum pipe pipe = crtc->pipe;
161 int scanline_end = intel_get_crtc_scanline(crtc); 157 int scanline_end = intel_get_crtc_scanline(crtc);
162 u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 158 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
163 ktime_t end_vbl_time = ktime_get(); 159 ktime_t end_vbl_time = ktime_get();
164 160
161 if (work) {
162 work->flip_queued_vblank = end_vbl_count;
163 smp_mb__before_atomic();
164 atomic_set(&work->pending, 1);
165 }
166
165 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); 167 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
166 168
167 local_irq_enable(); 169 local_irq_enable();
@@ -203,8 +205,6 @@ skl_update_plane(struct drm_plane *drm_plane,
203 uint32_t y = plane_state->src.y1 >> 16; 205 uint32_t y = plane_state->src.y1 >> 16;
204 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; 206 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
205 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; 207 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
206 const struct intel_scaler *scaler =
207 &crtc_state->scaler_state.scalers[plane_state->scaler_id];
208 208
209 plane_ctl = PLANE_CTL_ENABLE | 209 plane_ctl = PLANE_CTL_ENABLE |
210 PLANE_CTL_PIPE_GAMMA_ENABLE | 210 PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +260,16 @@ skl_update_plane(struct drm_plane *drm_plane,
260 260
261 /* program plane scaler */ 261 /* program plane scaler */
262 if (plane_state->scaler_id >= 0) { 262 if (plane_state->scaler_id >= 0) {
263 uint32_t ps_ctrl = 0;
264 int scaler_id = plane_state->scaler_id; 263 int scaler_id = plane_state->scaler_id;
264 const struct intel_scaler *scaler;
265 265
266 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, 266 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
267 PS_PLANE_SEL(plane)); 267 PS_PLANE_SEL(plane));
268 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; 268
269 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 269 scaler = &crtc_state->scaler_state.scalers[scaler_id];
270
271 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
272 PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
270 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 273 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
271 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); 274 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
272 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), 275 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..385114bca924 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
248 return HRTIMER_NORESTART; 248 return HRTIMER_NORESTART;
249} 249}
250 250
251void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 251void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
252 bool restore)
252{ 253{
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 unsigned long irqflags; 254 unsigned long irqflags;
255 struct intel_uncore_forcewake_domain *domain; 255 struct intel_uncore_forcewake_domain *domain;
256 int retry_count = 100; 256 int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
304 if (fw) 304 if (fw)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
306 306
307 if (IS_GEN6(dev) || IS_GEN7(dev)) 307 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
308 dev_priv->uncore.fifo_count = 308 dev_priv->uncore.fifo_count =
309 fifo_free_entries(dev_priv); 309 fifo_free_entries(dev_priv);
310 } 310 }
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
400 return false; 400 return false;
401} 401}
402 402
403static void __intel_uncore_early_sanitize(struct drm_device *dev, 403static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
404 bool restore_forcewake) 404 bool restore_forcewake)
405{ 405{
406 struct drm_i915_private *dev_priv = dev->dev_private;
407
408 /* clear out unclaimed reg detection bit */ 406 /* clear out unclaimed reg detection bit */
409 if (check_for_unclaimed_mmio(dev_priv)) 407 if (check_for_unclaimed_mmio(dev_priv))
410 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 408 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
411 409
412 /* clear out old GT FIFO errors */ 410 /* clear out old GT FIFO errors */
413 if (IS_GEN6(dev) || IS_GEN7(dev)) 411 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
414 __raw_i915_write32(dev_priv, GTFIFODBG, 412 __raw_i915_write32(dev_priv, GTFIFODBG,
415 __raw_i915_read32(dev_priv, GTFIFODBG)); 413 __raw_i915_read32(dev_priv, GTFIFODBG));
416 414
417 /* WaDisableShadowRegForCpd:chv */ 415 /* WaDisableShadowRegForCpd:chv */
418 if (IS_CHERRYVIEW(dev)) { 416 if (IS_CHERRYVIEW(dev_priv)) {
419 __raw_i915_write32(dev_priv, GTFIFOCTL, 417 __raw_i915_write32(dev_priv, GTFIFOCTL,
420 __raw_i915_read32(dev_priv, GTFIFOCTL) | 418 __raw_i915_read32(dev_priv, GTFIFOCTL) |
421 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 419 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
422 GT_FIFO_CTL_RC6_POLICY_STALL); 420 GT_FIFO_CTL_RC6_POLICY_STALL);
423 } 421 }
424 422
425 intel_uncore_forcewake_reset(dev, restore_forcewake); 423 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
426} 424}
427 425
428void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 426void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
427 bool restore_forcewake)
429{ 428{
430 __intel_uncore_early_sanitize(dev, restore_forcewake); 429 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
431 i915_check_and_clear_faults(dev); 430 i915_check_and_clear_faults(dev_priv);
432} 431}
433 432
434void intel_uncore_sanitize(struct drm_device *dev) 433void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
435{ 434{
436 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 435 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
437 436
438 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 437 /* BIOS often leaves RC6 enabled, but disable it for hw init */
439 intel_disable_gt_powersave(dev); 438 intel_disable_gt_powersave(dev_priv);
440} 439}
441 440
442static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 441static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
1233 fw_domain_reset(d); 1232 fw_domain_reset(d);
1234} 1233}
1235 1234
1236static void intel_uncore_fw_domains_init(struct drm_device *dev) 1235static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1237{ 1236{
1238 struct drm_i915_private *dev_priv = dev->dev_private;
1239
1240 if (INTEL_INFO(dev_priv)->gen <= 5) 1237 if (INTEL_INFO(dev_priv)->gen <= 5)
1241 return; 1238 return;
1242 1239
1243 if (IS_GEN9(dev)) { 1240 if (IS_GEN9(dev_priv)) {
1244 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1241 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1245 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1242 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1243 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1251 FORCEWAKE_ACK_BLITTER_GEN9); 1248 FORCEWAKE_ACK_BLITTER_GEN9);
1252 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1249 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1253 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1250 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1254 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1251 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1255 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1252 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1256 if (!IS_CHERRYVIEW(dev)) 1253 if (!IS_CHERRYVIEW(dev_priv))
1257 dev_priv->uncore.funcs.force_wake_put = 1254 dev_priv->uncore.funcs.force_wake_put =
1258 fw_domains_put_with_fifo; 1255 fw_domains_put_with_fifo;
1259 else 1256 else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1262 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1259 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1263 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1260 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1264 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1261 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1265 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1262 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1266 dev_priv->uncore.funcs.force_wake_get = 1263 dev_priv->uncore.funcs.force_wake_get =
1267 fw_domains_get_with_thread_status; 1264 fw_domains_get_with_thread_status;
1268 if (IS_HASWELL(dev)) 1265 if (IS_HASWELL(dev_priv))
1269 dev_priv->uncore.funcs.force_wake_put = 1266 dev_priv->uncore.funcs.force_wake_put =
1270 fw_domains_put_with_fifo; 1267 fw_domains_put_with_fifo;
1271 else 1268 else
1272 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1269 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1273 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1270 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1274 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1271 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1275 } else if (IS_IVYBRIDGE(dev)) { 1272 } else if (IS_IVYBRIDGE(dev_priv)) {
1276 u32 ecobus; 1273 u32 ecobus;
1277 1274
1278 /* IVB configs may use multi-threaded forcewake */ 1275 /* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1302 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1299 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1303 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1300 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1304 1301
1305 mutex_lock(&dev->struct_mutex);
1306 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1302 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1307 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1303 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1308 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1304 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1309 mutex_unlock(&dev->struct_mutex);
1310 1305
1311 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1306 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1312 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1307 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1314 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1309 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1315 FORCEWAKE, FORCEWAKE_ACK); 1310 FORCEWAKE, FORCEWAKE_ACK);
1316 } 1311 }
1317 } else if (IS_GEN6(dev)) { 1312 } else if (IS_GEN6(dev_priv)) {
1318 dev_priv->uncore.funcs.force_wake_get = 1313 dev_priv->uncore.funcs.force_wake_get =
1319 fw_domains_get_with_thread_status; 1314 fw_domains_get_with_thread_status;
1320 dev_priv->uncore.funcs.force_wake_put = 1315 dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1327 WARN_ON(dev_priv->uncore.fw_domains == 0); 1322 WARN_ON(dev_priv->uncore.fw_domains == 0);
1328} 1323}
1329 1324
1330void intel_uncore_init(struct drm_device *dev) 1325void intel_uncore_init(struct drm_i915_private *dev_priv)
1331{ 1326{
1332 struct drm_i915_private *dev_priv = dev->dev_private; 1327 i915_check_vgpu(dev_priv);
1333
1334 i915_check_vgpu(dev);
1335 1328
1336 intel_uncore_edram_detect(dev_priv); 1329 intel_uncore_edram_detect(dev_priv);
1337 intel_uncore_fw_domains_init(dev); 1330 intel_uncore_fw_domains_init(dev_priv);
1338 __intel_uncore_early_sanitize(dev, false); 1331 __intel_uncore_early_sanitize(dev_priv, false);
1339 1332
1340 dev_priv->uncore.unclaimed_mmio_check = 1; 1333 dev_priv->uncore.unclaimed_mmio_check = 1;
1341 1334
1342 switch (INTEL_INFO(dev)->gen) { 1335 switch (INTEL_INFO(dev_priv)->gen) {
1343 default: 1336 default:
1344 case 9: 1337 case 9:
1345 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1338 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1346 ASSIGN_READ_MMIO_VFUNCS(gen9); 1339 ASSIGN_READ_MMIO_VFUNCS(gen9);
1347 break; 1340 break;
1348 case 8: 1341 case 8:
1349 if (IS_CHERRYVIEW(dev)) { 1342 if (IS_CHERRYVIEW(dev_priv)) {
1350 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1343 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1351 ASSIGN_READ_MMIO_VFUNCS(chv); 1344 ASSIGN_READ_MMIO_VFUNCS(chv);
1352 1345
@@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev)
1357 break; 1350 break;
1358 case 7: 1351 case 7:
1359 case 6: 1352 case 6:
1360 if (IS_HASWELL(dev)) { 1353 if (IS_HASWELL(dev_priv)) {
1361 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1354 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1362 } else { 1355 } else {
1363 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1356 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1364 } 1357 }
1365 1358
1366 if (IS_VALLEYVIEW(dev)) { 1359 if (IS_VALLEYVIEW(dev_priv)) {
1367 ASSIGN_READ_MMIO_VFUNCS(vlv); 1360 ASSIGN_READ_MMIO_VFUNCS(vlv);
1368 } else { 1361 } else {
1369 ASSIGN_READ_MMIO_VFUNCS(gen6); 1362 ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1374,24 @@ void intel_uncore_init(struct drm_device *dev)
1381 break; 1374 break;
1382 } 1375 }
1383 1376
1384 if (intel_vgpu_active(dev)) { 1377 if (intel_vgpu_active(dev_priv)) {
1385 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1378 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1386 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1379 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1387 } 1380 }
1388 1381
1389 i915_check_and_clear_faults(dev); 1382 i915_check_and_clear_faults(dev_priv);
1390} 1383}
1391#undef ASSIGN_WRITE_MMIO_VFUNCS 1384#undef ASSIGN_WRITE_MMIO_VFUNCS
1392#undef ASSIGN_READ_MMIO_VFUNCS 1385#undef ASSIGN_READ_MMIO_VFUNCS
1393 1386
1394void intel_uncore_fini(struct drm_device *dev) 1387void intel_uncore_fini(struct drm_i915_private *dev_priv)
1395{ 1388{
1396 /* Paranoia: make sure we have disabled everything before we exit. */ 1389 /* Paranoia: make sure we have disabled everything before we exit. */
1397 intel_uncore_sanitize(dev); 1390 intel_uncore_sanitize(dev_priv);
1398 intel_uncore_forcewake_reset(dev, false); 1391 intel_uncore_forcewake_reset(dev_priv, false);
1399} 1392}
1400 1393
1401#define GEN_RANGE(l, h) GENMASK(h, l) 1394#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1402 1395
1403static const struct register_whitelist { 1396static const struct register_whitelist {
1404 i915_reg_t offset_ldw, offset_udw; 1397 i915_reg_t offset_ldw, offset_udw;
@@ -1423,7 +1416,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1423 1416
1424 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1417 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1425 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1418 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1426 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1419 (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
1427 break; 1420 break;
1428 } 1421 }
1429 1422
@@ -1467,83 +1460,47 @@ out:
1467 return ret; 1460 return ret;
1468} 1461}
1469 1462
1470int i915_get_reset_stats_ioctl(struct drm_device *dev, 1463static int i915_reset_complete(struct pci_dev *pdev)
1471 void *data, struct drm_file *file)
1472{
1473 struct drm_i915_private *dev_priv = dev->dev_private;
1474 struct drm_i915_reset_stats *args = data;
1475 struct i915_ctx_hang_stats *hs;
1476 struct intel_context *ctx;
1477 int ret;
1478
1479 if (args->flags || args->pad)
1480 return -EINVAL;
1481
1482 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1483 return -EPERM;
1484
1485 ret = mutex_lock_interruptible(&dev->struct_mutex);
1486 if (ret)
1487 return ret;
1488
1489 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1490 if (IS_ERR(ctx)) {
1491 mutex_unlock(&dev->struct_mutex);
1492 return PTR_ERR(ctx);
1493 }
1494 hs = &ctx->hang_stats;
1495
1496 if (capable(CAP_SYS_ADMIN))
1497 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1498 else
1499 args->reset_count = 0;
1500
1501 args->batch_active = hs->batch_active;
1502 args->batch_pending = hs->batch_pending;
1503
1504 mutex_unlock(&dev->struct_mutex);
1505
1506 return 0;
1507}
1508
1509static int i915_reset_complete(struct drm_device *dev)
1510{ 1464{
1511 u8 gdrst; 1465 u8 gdrst;
1512 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1466 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1513 return (gdrst & GRDOM_RESET_STATUS) == 0; 1467 return (gdrst & GRDOM_RESET_STATUS) == 0;
1514} 1468}
1515 1469
1516static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) 1470static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1517{ 1471{
1472 struct pci_dev *pdev = dev_priv->dev->pdev;
1473
1518 /* assert reset for at least 20 usec */ 1474 /* assert reset for at least 20 usec */
1519 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1475 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1520 udelay(20); 1476 udelay(20);
1521 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1477 pci_write_config_byte(pdev, I915_GDRST, 0);
1522 1478
1523 return wait_for(i915_reset_complete(dev), 500); 1479 return wait_for(i915_reset_complete(pdev), 500);
1524} 1480}
1525 1481
1526static int g4x_reset_complete(struct drm_device *dev) 1482static int g4x_reset_complete(struct pci_dev *pdev)
1527{ 1483{
1528 u8 gdrst; 1484 u8 gdrst;
1529 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1485 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1530 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1486 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1531} 1487}
1532 1488
1533static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) 1489static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1534{ 1490{
1535 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1491 struct pci_dev *pdev = dev_priv->dev->pdev;
1536 return wait_for(g4x_reset_complete(dev), 500); 1492 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1493 return wait_for(g4x_reset_complete(pdev), 500);
1537} 1494}
1538 1495
1539static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) 1496static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1540{ 1497{
1541 struct drm_i915_private *dev_priv = dev->dev_private; 1498 struct pci_dev *pdev = dev_priv->dev->pdev;
1542 int ret; 1499 int ret;
1543 1500
1544 pci_write_config_byte(dev->pdev, I915_GDRST, 1501 pci_write_config_byte(pdev, I915_GDRST,
1545 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1502 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1546 ret = wait_for(g4x_reset_complete(dev), 500); 1503 ret = wait_for(g4x_reset_complete(pdev), 500);
1547 if (ret) 1504 if (ret)
1548 return ret; 1505 return ret;
1549 1506
@@ -1551,9 +1508,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1551 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1508 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1552 POSTING_READ(VDECCLK_GATE_D); 1509 POSTING_READ(VDECCLK_GATE_D);
1553 1510
1554 pci_write_config_byte(dev->pdev, I915_GDRST, 1511 pci_write_config_byte(pdev, I915_GDRST,
1555 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1512 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1556 ret = wait_for(g4x_reset_complete(dev), 500); 1513 ret = wait_for(g4x_reset_complete(pdev), 500);
1557 if (ret) 1514 if (ret)
1558 return ret; 1515 return ret;
1559 1516
@@ -1561,14 +1518,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1561 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1518 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1562 POSTING_READ(VDECCLK_GATE_D); 1519 POSTING_READ(VDECCLK_GATE_D);
1563 1520
1564 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1521 pci_write_config_byte(pdev, I915_GDRST, 0);
1565 1522
1566 return 0; 1523 return 0;
1567} 1524}
1568 1525
1569static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) 1526static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1527 unsigned engine_mask)
1570{ 1528{
1571 struct drm_i915_private *dev_priv = dev->dev_private;
1572 int ret; 1529 int ret;
1573 1530
1574 I915_WRITE(ILK_GDSR, 1531 I915_WRITE(ILK_GDSR,
@@ -1612,7 +1569,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1612 1569
1613/** 1570/**
1614 * gen6_reset_engines - reset individual engines 1571 * gen6_reset_engines - reset individual engines
1615 * @dev: DRM device 1572 * @dev_priv: i915 device
1616 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1573 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1617 * 1574 *
1618 * This function will reset the individual engines that are set in engine_mask. 1575 * This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1623 * 1580 *
1624 * Returns 0 on success, nonzero on error. 1581 * Returns 0 on success, nonzero on error.
1625 */ 1582 */
1626static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) 1583static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1584 unsigned engine_mask)
1627{ 1585{
1628 struct drm_i915_private *dev_priv = dev->dev_private;
1629 struct intel_engine_cs *engine; 1586 struct intel_engine_cs *engine;
1630 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1587 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1631 [RCS] = GEN6_GRDOM_RENDER, 1588 [RCS] = GEN6_GRDOM_RENDER,
@@ -1647,7 +1604,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
1647 1604
1648 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1605 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1649 1606
1650 intel_uncore_forcewake_reset(dev, true); 1607 intel_uncore_forcewake_reset(dev_priv, true);
1651 1608
1652 return ret; 1609 return ret;
1653} 1610}
@@ -1663,8 +1620,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv,
1663 1620
1664static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1621static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1665{ 1622{
1623 struct drm_i915_private *dev_priv = engine->i915;
1666 int ret; 1624 int ret;
1667 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1668 1625
1669 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1626 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1670 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1627 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@@ -1682,22 +1639,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1682 1639
1683static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1640static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1684{ 1641{
1685 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1642 struct drm_i915_private *dev_priv = engine->i915;
1686 1643
1687 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1644 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1688 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1645 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1689} 1646}
1690 1647
1691static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) 1648static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1649 unsigned engine_mask)
1692{ 1650{
1693 struct drm_i915_private *dev_priv = dev->dev_private;
1694 struct intel_engine_cs *engine; 1651 struct intel_engine_cs *engine;
1695 1652
1696 for_each_engine_masked(engine, dev_priv, engine_mask) 1653 for_each_engine_masked(engine, dev_priv, engine_mask)
1697 if (gen8_request_engine_reset(engine)) 1654 if (gen8_request_engine_reset(engine))
1698 goto not_ready; 1655 goto not_ready;
1699 1656
1700 return gen6_reset_engines(dev, engine_mask); 1657 return gen6_reset_engines(dev_priv, engine_mask);
1701 1658
1702not_ready: 1659not_ready:
1703 for_each_engine_masked(engine, dev_priv, engine_mask) 1660 for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1663,35 @@ not_ready:
1706 return -EIO; 1663 return -EIO;
1707} 1664}
1708 1665
1709static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, 1666typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1710 unsigned engine_mask) 1667
1668static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1711{ 1669{
1712 if (!i915.reset) 1670 if (!i915.reset)
1713 return NULL; 1671 return NULL;
1714 1672
1715 if (INTEL_INFO(dev)->gen >= 8) 1673 if (INTEL_INFO(dev_priv)->gen >= 8)
1716 return gen8_reset_engines; 1674 return gen8_reset_engines;
1717 else if (INTEL_INFO(dev)->gen >= 6) 1675 else if (INTEL_INFO(dev_priv)->gen >= 6)
1718 return gen6_reset_engines; 1676 return gen6_reset_engines;
1719 else if (IS_GEN5(dev)) 1677 else if (IS_GEN5(dev_priv))
1720 return ironlake_do_reset; 1678 return ironlake_do_reset;
1721 else if (IS_G4X(dev)) 1679 else if (IS_G4X(dev_priv))
1722 return g4x_do_reset; 1680 return g4x_do_reset;
1723 else if (IS_G33(dev)) 1681 else if (IS_G33(dev_priv))
1724 return g33_do_reset; 1682 return g33_do_reset;
1725 else if (INTEL_INFO(dev)->gen >= 3) 1683 else if (INTEL_INFO(dev_priv)->gen >= 3)
1726 return i915_do_reset; 1684 return i915_do_reset;
1727 else 1685 else
1728 return NULL; 1686 return NULL;
1729} 1687}
1730 1688
1731int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) 1689int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1732{ 1690{
1733 struct drm_i915_private *dev_priv = to_i915(dev); 1691 reset_func reset;
1734 int (*reset)(struct drm_device *, unsigned);
1735 int ret; 1692 int ret;
1736 1693
1737 reset = intel_get_gpu_reset(dev); 1694 reset = intel_get_gpu_reset(dev_priv);
1738 if (reset == NULL) 1695 if (reset == NULL)
1739 return -ENODEV; 1696 return -ENODEV;
1740 1697
@@ -1742,15 +1699,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
1742 * request may be dropped and never completes (causing -EIO). 1699 * request may be dropped and never completes (causing -EIO).
1743 */ 1700 */
1744 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1701 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1745 ret = reset(dev, engine_mask); 1702 ret = reset(dev_priv, engine_mask);
1746 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1703 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1747 1704
1748 return ret; 1705 return ret;
1749} 1706}
1750 1707
1751bool intel_has_gpu_reset(struct drm_device *dev) 1708bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1752{ 1709{
1753 return intel_get_gpu_reset(dev) != NULL; 1710 return intel_get_gpu_reset(dev_priv) != NULL;
1754} 1711}
1755 1712
1756int intel_guc_reset(struct drm_i915_private *dev_priv) 1713int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1802,10 +1759,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1802{ 1759{
1803 enum forcewake_domains fw_domains; 1760 enum forcewake_domains fw_domains;
1804 1761
1805 if (intel_vgpu_active(dev_priv->dev)) 1762 if (intel_vgpu_active(dev_priv))
1806 return 0; 1763 return 0;
1807 1764
1808 switch (INTEL_INFO(dev_priv)->gen) { 1765 switch (INTEL_GEN(dev_priv)) {
1809 case 9: 1766 case 9:
1810 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); 1767 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1811 break; 1768 break;
@@ -1842,10 +1799,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1842{ 1799{
1843 enum forcewake_domains fw_domains; 1800 enum forcewake_domains fw_domains;
1844 1801
1845 if (intel_vgpu_active(dev_priv->dev)) 1802 if (intel_vgpu_active(dev_priv))
1846 return 0; 1803 return 0;
1847 1804
1848 switch (INTEL_INFO(dev_priv)->gen) { 1805 switch (INTEL_GEN(dev_priv)) {
1849 case 9: 1806 case 9:
1850 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); 1807 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
1851 break; 1808 break;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index c15051de8023..4f9799f025a9 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -446,10 +446,16 @@ struct bdb_lfp_backlight_data_entry {
446 u8 obsolete3; 446 u8 obsolete3;
447} __packed; 447} __packed;
448 448
449struct bdb_lfp_backlight_control_method {
450 u8 type:4;
451 u8 controller:4;
452} __packed;
453
449struct bdb_lfp_backlight_data { 454struct bdb_lfp_backlight_data {
450 u8 entry_size; 455 u8 entry_size;
451 struct bdb_lfp_backlight_data_entry data[16]; 456 struct bdb_lfp_backlight_data_entry data[16];
452 u8 level[16]; 457 u8 level[16];
458 struct bdb_lfp_backlight_control_method backlight_control[16];
453} __packed; 459} __packed;
454 460
455struct aimdb_header { 461struct aimdb_header {
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63799..6fc156a3918d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
205 goto free_uar; 205 goto free_uar;
206 } 206 }
207 207
208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); 208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
209 uar->index << PAGE_SHIFT,
210 PAGE_SIZE);
209 if (!uar->bf_map) { 211 if (!uar->bf_map) {
210 err = -ENOMEM; 212 err = -ENOMEM;
211 goto unamp_uar; 213 goto unamp_uar;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index af8b71b2e5bd..c5d29505f937 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -990,6 +990,7 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
990extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); 990extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
991extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 991extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
992extern void drm_vblank_cleanup(struct drm_device *dev); 992extern void drm_vblank_cleanup(struct drm_device *dev);
993extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
993extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); 994extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
994 995
995extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 996extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9d03f167007b..5a848e734422 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
622#define DP_BRANCH_OUI_HEADER_SIZE 0xc 622#define DP_BRANCH_OUI_HEADER_SIZE 0xc
623#define DP_RECEIVER_CAP_SIZE 0xf 623#define DP_RECEIVER_CAP_SIZE 0xf
624#define EDP_PSR_RECEIVER_CAP_SIZE 2 624#define EDP_PSR_RECEIVER_CAP_SIZE 2
625#define EDP_DISPLAY_CTL_CAP_SIZE 3
625 626
626void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); 627void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
627void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); 628void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 595f85c392ac..b1755f8db36b 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
92#define I845_TSEG_SIZE_512K (2 << 1) 92#define I845_TSEG_SIZE_512K (2 << 1)
93#define I845_TSEG_SIZE_1M (3 << 1) 93#define I845_TSEG_SIZE_1M (3 << 1)
94 94
95#define INTEL_BSM 0x5c
96#define INTEL_BSM_MASK (0xFFFF << 20)
97
95#endif /* _I915_DRM_H_ */ 98#endif /* _I915_DRM_H_ */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e399029b68c5..645ad06b5d52 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
100} 100}
101 101
102static inline void __iomem * 102static inline void __iomem *
103io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 103io_mapping_map_wc(struct io_mapping *mapping,
104 unsigned long offset,
105 unsigned long size)
104{ 106{
105 resource_size_t phys_addr; 107 resource_size_t phys_addr;
106 108
107 BUG_ON(offset >= mapping->size); 109 BUG_ON(offset >= mapping->size);
108 phys_addr = mapping->base + offset; 110 phys_addr = mapping->base + offset;
109 111
110 return ioremap_wc(phys_addr, PAGE_SIZE); 112 return ioremap_wc(phys_addr, size);
111} 113}
112 114
113static inline void 115static inline void
@@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
155 157
156/* Non-atomic map/unmap */ 158/* Non-atomic map/unmap */
157static inline void __iomem * 159static inline void __iomem *
158io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 160io_mapping_map_wc(struct io_mapping *mapping,
161 unsigned long offset,
162 unsigned long size)
159{ 163{
160 return ((char __force __iomem *) mapping) + offset; 164 return ((char __force __iomem *) mapping) + offset;
161} 165}