aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/drm_plane.c2
-rw-r--r--drivers/gpu/drm/drm_vblank.c85
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c56
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c280
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h58
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c127
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c40
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c33
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c113
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c110
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c106
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h79
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c16
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c1
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c107
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c20
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c239
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c6
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c102
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h19
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c28
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c176
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c13
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c13
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c57
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c28
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c44
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c94
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c161
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c82
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c28
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c23
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/drm_vblank.h4
-rw-r--r--include/drm/i915_pciids.h15
83 files changed, 1867 insertions, 956 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 1eb0861c9147..93f700ab1bfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -210,7 +210,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
210 amdgpu_bo_unreserve(new_abo); 210 amdgpu_bo_unreserve(new_abo);
211 211
212 work->base = base; 212 work->base = base;
213 work->target_vblank = target - drm_crtc_vblank_count(crtc) + 213 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
214 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 214 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
215 215
216 /* we borrow the event spin lock for protecting flip_wrok */ 216 /* we borrow the event spin lock for protecting flip_wrok */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 84f6fe9a448b..246fff33c7bf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3842,7 +3842,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3842 3842
3843 3843
3844 /* Prepare wait for target vblank early - before the fence-waits */ 3844 /* Prepare wait for target vblank early - before the fence-waits */
3845 target_vblank = target - drm_crtc_vblank_count(crtc) + 3845 target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
3846 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); 3846 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3847 3847
3848 /* TODO This might fail and hence better not used, wait 3848 /* TODO This might fail and hence better not used, wait
@@ -3988,7 +3988,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3988 amdgpu_dm_do_flip( 3988 amdgpu_dm_do_flip(
3989 crtc, 3989 crtc,
3990 fb, 3990 fb,
3991 drm_crtc_vblank_count(crtc) + *wait_for_vblank, 3991 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3992 dm_state->context); 3992 dm_state->context);
3993 } 3993 }
3994 3994
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 22b54663b6e7..09de6ecb3968 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -948,7 +948,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
948 if (r) 948 if (r)
949 return r; 949 return r;
950 950
951 current_vblank = drm_crtc_vblank_count(crtc); 951 current_vblank = (u32)drm_crtc_vblank_count(crtc);
952 952
953 switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) { 953 switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
954 case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE: 954 case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 32d9bcf5be7f..c781cb426bf1 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -271,7 +271,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
271 store_vblank(dev, pipe, diff, t_vblank, cur_vblank); 271 store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
272} 272}
273 273
274static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe) 274static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
275{ 275{
276 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 276 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
277 277
@@ -292,11 +292,11 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
292 * This is mostly useful for hardware that can obtain the scanout position, but 292 * This is mostly useful for hardware that can obtain the scanout position, but
293 * doesn't have a hardware frame counter. 293 * doesn't have a hardware frame counter.
294 */ 294 */
295u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) 295u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
296{ 296{
297 struct drm_device *dev = crtc->dev; 297 struct drm_device *dev = crtc->dev;
298 unsigned int pipe = drm_crtc_index(crtc); 298 unsigned int pipe = drm_crtc_index(crtc);
299 u32 vblank; 299 u64 vblank;
300 unsigned long flags; 300 unsigned long flags;
301 301
302 WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp, 302 WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
@@ -347,23 +347,25 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
347 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 347 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
348 348
349 /* 349 /*
350 * Only disable vblank interrupts if they're enabled. This avoids 350 * Update vblank count and disable vblank interrupts only if the
351 * calling the ->disable_vblank() operation in atomic context with the 351 * interrupts were enabled. This avoids calling the ->disable_vblank()
352 * hardware potentially runtime suspended. 352 * operation in atomic context with the hardware potentially runtime
353 * suspended.
353 */ 354 */
354 if (vblank->enabled) { 355 if (!vblank->enabled)
355 __disable_vblank(dev, pipe); 356 goto out;
356 vblank->enabled = false;
357 }
358 357
359 /* 358 /*
360 * Always update the count and timestamp to maintain the 359 * Update the count and timestamp to maintain the
361 * appearance that the counter has been ticking all along until 360 * appearance that the counter has been ticking all along until
362 * this time. This makes the count account for the entire time 361 * this time. This makes the count account for the entire time
363 * between drm_crtc_vblank_on() and drm_crtc_vblank_off(). 362 * between drm_crtc_vblank_on() and drm_crtc_vblank_off().
364 */ 363 */
365 drm_update_vblank_count(dev, pipe, false); 364 drm_update_vblank_count(dev, pipe, false);
365 __disable_vblank(dev, pipe);
366 vblank->enabled = false;
366 367
368out:
367 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 369 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
368} 370}
369 371
@@ -1055,7 +1057,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
1055{ 1057{
1056 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1058 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1057 int ret; 1059 int ret;
1058 u32 last; 1060 u64 last;
1059 1061
1060 if (WARN_ON(pipe >= dev->num_crtcs)) 1062 if (WARN_ON(pipe >= dev->num_crtcs))
1061 return; 1063 return;
@@ -1235,6 +1237,65 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
1235} 1237}
1236EXPORT_SYMBOL(drm_crtc_vblank_on); 1238EXPORT_SYMBOL(drm_crtc_vblank_on);
1237 1239
1240/**
1241 * drm_vblank_restore - estimated vblanks using timestamps and update it.
1242 *
1243 * Power manamement features can cause frame counter resets between vblank
1244 * disable and enable. Drivers can then use this function in their
1245 * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since
1246 * the last &drm_crtc_funcs.disable_vblank.
1247 *
1248 * This function is the legacy version of drm_crtc_vblank_restore().
1249 */
1250void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
1251{
1252 ktime_t t_vblank;
1253 struct drm_vblank_crtc *vblank;
1254 int framedur_ns;
1255 u64 diff_ns;
1256 u32 cur_vblank, diff = 1;
1257 int count = DRM_TIMESTAMP_MAXRETRIES;
1258
1259 if (WARN_ON(pipe >= dev->num_crtcs))
1260 return;
1261
1262 assert_spin_locked(&dev->vbl_lock);
1263 assert_spin_locked(&dev->vblank_time_lock);
1264
1265 vblank = &dev->vblank[pipe];
1266 WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns,
1267 "Cannot compute missed vblanks without frame duration\n");
1268 framedur_ns = vblank->framedur_ns;
1269
1270 do {
1271 cur_vblank = __get_vblank_counter(dev, pipe);
1272 drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false);
1273 } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
1274
1275 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
1276 if (framedur_ns)
1277 diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
1278
1279
1280 DRM_DEBUG_VBL("missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n",
1281 diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
1282 store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
1283}
1284EXPORT_SYMBOL(drm_vblank_restore);
1285
1286/**
1287 * drm_crtc_vblank_restore - estimate vblanks using timestamps and update it.
1288 * Power manamement features can cause frame counter resets between vblank
1289 * disable and enable. Drivers can then use this function in their
1290 * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since
1291 * the last &drm_crtc_funcs.disable_vblank.
1292 */
1293void drm_crtc_vblank_restore(struct drm_crtc *crtc)
1294{
1295 drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
1296}
1297EXPORT_SYMBOL(drm_crtc_vblank_restore);
1298
1238static void drm_legacy_vblank_pre_modeset(struct drm_device *dev, 1299static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
1239 unsigned int pipe) 1300 unsigned int pipe)
1240{ 1301{
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3bddd8a06806..f55cc028b2eb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -17,6 +17,7 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
17subdir-ccflags-y += $(call cc-disable-warning, type-limits) 17subdir-ccflags-y += $(call cc-disable-warning, type-limits)
18subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) 18subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
19subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) 19subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
20subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
20subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror 21subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
21 22
22# Fine grained warnings disable 23# Fine grained warnings disable
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 4950b82f5b49..c73aff163908 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -59,28 +59,28 @@
59 * This must not be set while VR01_DVO_BYPASS_ENABLE is set. 59 * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
60 */ 60 */
61# define VR01_LCD_ENABLE (1 << 2) 61# define VR01_LCD_ENABLE (1 << 2)
62/** Enables the DVO repeater. */ 62/* Enables the DVO repeater. */
63# define VR01_DVO_BYPASS_ENABLE (1 << 1) 63# define VR01_DVO_BYPASS_ENABLE (1 << 1)
64/** Enables the DVO clock */ 64/* Enables the DVO clock */
65# define VR01_DVO_ENABLE (1 << 0) 65# define VR01_DVO_ENABLE (1 << 0)
66/** Enable dithering for 18bpp panels. Not documented. */ 66/* Enable dithering for 18bpp panels. Not documented. */
67# define VR01_DITHER_ENABLE (1 << 4) 67# define VR01_DITHER_ENABLE (1 << 4)
68 68
69/* 69/*
70 * LCD Interface Format 70 * LCD Interface Format
71 */ 71 */
72#define VR10 0x10 72#define VR10 0x10
73/** Enables LVDS output instead of CMOS */ 73/* Enables LVDS output instead of CMOS */
74# define VR10_LVDS_ENABLE (1 << 4) 74# define VR10_LVDS_ENABLE (1 << 4)
75/** Enables 18-bit LVDS output. */ 75/* Enables 18-bit LVDS output. */
76# define VR10_INTERFACE_1X18 (0 << 2) 76# define VR10_INTERFACE_1X18 (0 << 2)
77/** Enables 24-bit LVDS or CMOS output */ 77/* Enables 24-bit LVDS or CMOS output */
78# define VR10_INTERFACE_1X24 (1 << 2) 78# define VR10_INTERFACE_1X24 (1 << 2)
79/** Enables 2x18-bit LVDS or CMOS output. */ 79/* Enables 2x18-bit LVDS or CMOS output. */
80# define VR10_INTERFACE_2X18 (2 << 2) 80# define VR10_INTERFACE_2X18 (2 << 2)
81/** Enables 2x24-bit LVDS output */ 81/* Enables 2x24-bit LVDS output */
82# define VR10_INTERFACE_2X24 (3 << 2) 82# define VR10_INTERFACE_2X24 (3 << 2)
83/** Mask that defines the depth of the pipeline */ 83/* Mask that defines the depth of the pipeline */
84# define VR10_INTERFACE_DEPTH_MASK (3 << 2) 84# define VR10_INTERFACE_DEPTH_MASK (3 << 2)
85 85
86/* 86/*
@@ -97,7 +97,7 @@
97 * Panel power down status 97 * Panel power down status
98 */ 98 */
99#define VR30 0x30 99#define VR30 0x30
100/** Read only bit indicating that the panel is not in a safe poweroff state. */ 100/* Read only bit indicating that the panel is not in a safe poweroff state. */
101# define VR30_PANEL_ON (1 << 15) 101# define VR30_PANEL_ON (1 << 15)
102 102
103#define VR40 0x40 103#define VR40 0x40
@@ -183,7 +183,7 @@ struct ivch_priv {
183 183
184 184
185static void ivch_dump_regs(struct intel_dvo_device *dvo); 185static void ivch_dump_regs(struct intel_dvo_device *dvo);
186/** 186/*
187 * Reads a register on the ivch. 187 * Reads a register on the ivch.
188 * 188 *
189 * Each of the 256 registers are 16 bits long. 189 * Each of the 256 registers are 16 bits long.
@@ -230,7 +230,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
230 return false; 230 return false;
231} 231}
232 232
233/** Writes a 16-bit register on the ivch */ 233/* Writes a 16-bit register on the ivch */
234static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) 234static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
235{ 235{
236 struct ivch_priv *priv = dvo->dev_priv; 236 struct ivch_priv *priv = dvo->dev_priv;
@@ -258,7 +258,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
258 return false; 258 return false;
259} 259}
260 260
261/** Probes the given bus and slave address for an ivch */ 261/* Probes the given bus and slave address for an ivch */
262static bool ivch_init(struct intel_dvo_device *dvo, 262static bool ivch_init(struct intel_dvo_device *dvo,
263 struct i2c_adapter *adapter) 263 struct i2c_adapter *adapter)
264{ 264{
@@ -338,7 +338,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
338 ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]); 338 ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
339} 339}
340 340
341/** Sets the power state of the panel connected to the ivch */ 341/* Sets the power state of the panel connected to the ivch */
342static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) 342static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
343{ 343{
344 int i; 344 int i;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2fb7b34ef561..9a471b0afb15 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -162,8 +162,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
162 info->size << PAGE_SHIFT); 162 info->size << PAGE_SHIFT);
163 i915_gem_object_init(obj, &intel_vgpu_gem_ops); 163 i915_gem_object_init(obj, &intel_vgpu_gem_ops);
164 164
165 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 165 obj->read_domains = I915_GEM_DOMAIN_GTT;
166 obj->base.write_domain = 0; 166 obj->write_domain = 0;
167 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 167 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
168 unsigned int tiling_mode = 0; 168 unsigned int tiling_mode = 0;
169 unsigned int stride = 0; 169 unsigned int stride = 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3849ded354e3..05b41045b8f9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -49,6 +49,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
49 49
50 intel_device_info_dump_flags(info, &p); 50 intel_device_info_dump_flags(info, &p);
51 intel_device_info_dump_runtime(info, &p); 51 intel_device_info_dump_runtime(info, &p);
52 intel_driver_caps_print(&dev_priv->caps, &p);
52 53
53 kernel_param_lock(THIS_MODULE); 54 kernel_param_lock(THIS_MODULE);
54 i915_params_dump(&i915_modparams, &p); 55 i915_params_dump(&i915_modparams, &p);
@@ -149,8 +150,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
149 get_global_flag(obj), 150 get_global_flag(obj),
150 get_pin_mapped_flag(obj), 151 get_pin_mapped_flag(obj),
151 obj->base.size / 1024, 152 obj->base.size / 1024,
152 obj->base.read_domains, 153 obj->read_domains,
153 obj->base.write_domain, 154 obj->write_domain,
154 i915_cache_level_str(dev_priv, obj->cache_level), 155 i915_cache_level_str(dev_priv, obj->cache_level),
155 obj->mm.dirty ? " dirty" : "", 156 obj->mm.dirty ? " dirty" : "",
156 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -1460,19 +1461,6 @@ static int gen6_drpc_info(struct seq_file *m)
1460 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1461 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1461 u32 gt_core_status, rcctl1, rc6vids = 0; 1462 u32 gt_core_status, rcctl1, rc6vids = 0;
1462 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1463 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1463 unsigned forcewake_count;
1464 int count = 0;
1465
1466 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
1467 if (forcewake_count) {
1468 seq_puts(m, "RC information inaccurate because somebody "
1469 "holds a forcewake reference \n");
1470 } else {
1471 /* NB: we cannot use forcewake, else we read the wrong values */
1472 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1473 udelay(10);
1474 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1475 }
1476 1464
1477 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1465 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1478 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1466 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
@@ -1483,9 +1471,12 @@ static int gen6_drpc_info(struct seq_file *m)
1483 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1471 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1484 } 1472 }
1485 1473
1486 mutex_lock(&dev_priv->pcu_lock); 1474 if (INTEL_GEN(dev_priv) <= 7) {
1487 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1475 mutex_lock(&dev_priv->pcu_lock);
1488 mutex_unlock(&dev_priv->pcu_lock); 1476 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1477 &rc6vids);
1478 mutex_unlock(&dev_priv->pcu_lock);
1479 }
1489 1480
1490 seq_printf(m, "RC1e Enabled: %s\n", 1481 seq_printf(m, "RC1e Enabled: %s\n",
1491 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1482 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1541,12 +1532,15 @@ static int gen6_drpc_info(struct seq_file *m)
1541 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1532 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1542 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1533 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1543 1534
1544 seq_printf(m, "RC6 voltage: %dmV\n", 1535 if (INTEL_GEN(dev_priv) <= 7) {
1545 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1536 seq_printf(m, "RC6 voltage: %dmV\n",
1546 seq_printf(m, "RC6+ voltage: %dmV\n", 1537 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1547 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1538 seq_printf(m, "RC6+ voltage: %dmV\n",
1548 seq_printf(m, "RC6++ voltage: %dmV\n", 1539 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1549 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1540 seq_printf(m, "RC6++ voltage: %dmV\n",
1541 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1542 }
1543
1550 return i915_forcewake_domains(m, NULL); 1544 return i915_forcewake_domains(m, NULL);
1551} 1545}
1552 1546
@@ -1599,7 +1593,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1599 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 1593 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1600 1594
1601 if (fbc->work.scheduled) 1595 if (fbc->work.scheduled)
1602 seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n", 1596 seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
1603 fbc->work.scheduled_vblank, 1597 fbc->work.scheduled_vblank,
1604 drm_crtc_vblank_count(&fbc->crtc->base)); 1598 drm_crtc_vblank_count(&fbc->crtc->base));
1605 1599
@@ -2338,7 +2332,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
2338 return -ENODEV; 2332 return -ENODEV;
2339 2333
2340 GEM_BUG_ON(!guc->execbuf_client); 2334 GEM_BUG_ON(!guc->execbuf_client);
2341 GEM_BUG_ON(!guc->preempt_client);
2342 2335
2343 seq_printf(m, "Doorbell map:\n"); 2336 seq_printf(m, "Doorbell map:\n");
2344 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); 2337 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
@@ -2346,8 +2339,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
2346 2339
2347 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2340 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2348 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2341 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2349 seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client); 2342 if (guc->preempt_client) {
2350 i915_guc_client_info(m, dev_priv, guc->preempt_client); 2343 seq_printf(m, "\nGuC preempt client @ %p:\n",
2344 guc->preempt_client);
2345 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2346 }
2351 2347
2352 i915_guc_log_info(m, dev_priv); 2348 i915_guc_log_info(m, dev_priv);
2353 2349
@@ -4083,10 +4079,8 @@ i915_drop_caches_set(void *data, u64 val)
4083 if (val & DROP_IDLE) 4079 if (val & DROP_IDLE)
4084 drain_delayed_work(&dev_priv->gt.idle_work); 4080 drain_delayed_work(&dev_priv->gt.idle_work);
4085 4081
4086 if (val & DROP_FREED) { 4082 if (val & DROP_FREED)
4087 synchronize_rcu();
4088 i915_gem_drain_freed_objects(dev_priv); 4083 i915_gem_drain_freed_objects(dev_priv);
4089 }
4090 4084
4091 return ret; 4085 return ret;
4092} 4086}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9f1daf258fe..d09f8e661fbd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -122,10 +122,90 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
122 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ 122 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
123 fmt, ##__VA_ARGS__) 123 fmt, ##__VA_ARGS__)
124 124
125/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
126static enum intel_pch
127intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
128{
129 switch (id) {
130 case INTEL_PCH_IBX_DEVICE_ID_TYPE:
131 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
132 WARN_ON(!IS_GEN5(dev_priv));
133 return PCH_IBX;
134 case INTEL_PCH_CPT_DEVICE_ID_TYPE:
135 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
136 WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
137 return PCH_CPT;
138 case INTEL_PCH_PPT_DEVICE_ID_TYPE:
139 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
140 WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
141 /* PantherPoint is CPT compatible */
142 return PCH_CPT;
143 case INTEL_PCH_LPT_DEVICE_ID_TYPE:
144 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
145 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
146 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
147 return PCH_LPT;
148 case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
149 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
150 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
151 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
152 return PCH_LPT;
153 case INTEL_PCH_WPT_DEVICE_ID_TYPE:
154 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
155 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
156 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
157 /* WildcatPoint is LPT compatible */
158 return PCH_LPT;
159 case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
160 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
161 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
162 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
163 /* WildcatPoint is LPT compatible */
164 return PCH_LPT;
165 case INTEL_PCH_SPT_DEVICE_ID_TYPE:
166 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
167 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
168 return PCH_SPT;
169 case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
170 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
171 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
172 return PCH_SPT;
173 case INTEL_PCH_KBP_DEVICE_ID_TYPE:
174 DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
175 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
176 !IS_COFFEELAKE(dev_priv));
177 return PCH_KBP;
178 case INTEL_PCH_CNP_DEVICE_ID_TYPE:
179 DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
180 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
181 return PCH_CNP;
182 case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
183 DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
184 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
185 return PCH_CNP;
186 case INTEL_PCH_ICP_DEVICE_ID_TYPE:
187 DRM_DEBUG_KMS("Found Ice Lake PCH\n");
188 WARN_ON(!IS_ICELAKE(dev_priv));
189 return PCH_ICP;
190 default:
191 return PCH_NONE;
192 }
193}
125 194
126static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) 195static bool intel_is_virt_pch(unsigned short id,
196 unsigned short svendor, unsigned short sdevice)
127{ 197{
128 enum intel_pch ret = PCH_NOP; 198 return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
199 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
200 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
201 svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
202 sdevice == PCI_SUBDEVICE_ID_QEMU));
203}
204
205static unsigned short
206intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
207{
208 unsigned short id = 0;
129 209
130 /* 210 /*
131 * In a virtualized passthrough environment we can be in a 211 * In a virtualized passthrough environment we can be in a
@@ -134,28 +214,25 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
134 * make an educated guess as to which PCH is really there. 214 * make an educated guess as to which PCH is really there.
135 */ 215 */
136 216
137 if (IS_GEN5(dev_priv)) { 217 if (IS_GEN5(dev_priv))
138 ret = PCH_IBX; 218 id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
139 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 219 else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
140 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { 220 id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
141 ret = PCH_CPT; 221 else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
142 DRM_DEBUG_KMS("Assuming CougarPoint PCH\n"); 222 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
143 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 223 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
144 ret = PCH_LPT; 224 id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
145 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 225 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
146 dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; 226 id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
147 else 227 else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
148 dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE; 228 id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
149 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 229
150 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 230 if (id)
151 ret = PCH_SPT; 231 DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
152 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 232 else
153 } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { 233 DRM_DEBUG_KMS("Assuming no PCH\n");
154 ret = PCH_CNP;
155 DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
156 }
157 234
158 return ret; 235 return id;
159} 236}
160 237
161static void intel_detect_pch(struct drm_i915_private *dev_priv) 238static void intel_detect_pch(struct drm_i915_private *dev_priv)
@@ -183,102 +260,32 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
183 */ 260 */
184 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { 261 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
185 unsigned short id; 262 unsigned short id;
263 enum intel_pch pch_type;
186 264
187 if (pch->vendor != PCI_VENDOR_ID_INTEL) 265 if (pch->vendor != PCI_VENDOR_ID_INTEL)
188 continue; 266 continue;
189 267
190 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 268 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
191 269
192 dev_priv->pch_id = id; 270 pch_type = intel_pch_type(dev_priv, id);
193 271 if (pch_type != PCH_NONE) {
194 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 272 dev_priv->pch_type = pch_type;
195 dev_priv->pch_type = PCH_IBX; 273 dev_priv->pch_id = id;
196 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 274 break;
197 WARN_ON(!IS_GEN5(dev_priv)); 275 } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
198 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 276 pch->subsystem_device)) {
199 dev_priv->pch_type = PCH_CPT; 277 id = intel_virt_detect_pch(dev_priv);
200 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 278 if (id) {
201 WARN_ON(!IS_GEN6(dev_priv) && 279 pch_type = intel_pch_type(dev_priv, id);
202 !IS_IVYBRIDGE(dev_priv)); 280 if (WARN_ON(pch_type == PCH_NONE))
203 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 281 pch_type = PCH_NOP;
204 /* PantherPoint is CPT compatible */ 282 } else {
205 dev_priv->pch_type = PCH_CPT; 283 pch_type = PCH_NOP;
206 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 284 }
207 WARN_ON(!IS_GEN6(dev_priv) && 285 dev_priv->pch_type = pch_type;
208 !IS_IVYBRIDGE(dev_priv)); 286 dev_priv->pch_id = id;
209 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 287 break;
210 dev_priv->pch_type = PCH_LPT;
211 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
212 WARN_ON(!IS_HASWELL(dev_priv) &&
213 !IS_BROADWELL(dev_priv));
214 WARN_ON(IS_HSW_ULT(dev_priv) ||
215 IS_BDW_ULT(dev_priv));
216 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
217 dev_priv->pch_type = PCH_LPT;
218 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
219 WARN_ON(!IS_HASWELL(dev_priv) &&
220 !IS_BROADWELL(dev_priv));
221 WARN_ON(!IS_HSW_ULT(dev_priv) &&
222 !IS_BDW_ULT(dev_priv));
223 } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
224 /* WildcatPoint is LPT compatible */
225 dev_priv->pch_type = PCH_LPT;
226 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
227 WARN_ON(!IS_HASWELL(dev_priv) &&
228 !IS_BROADWELL(dev_priv));
229 WARN_ON(IS_HSW_ULT(dev_priv) ||
230 IS_BDW_ULT(dev_priv));
231 } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
232 /* WildcatPoint is LPT compatible */
233 dev_priv->pch_type = PCH_LPT;
234 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
235 WARN_ON(!IS_HASWELL(dev_priv) &&
236 !IS_BROADWELL(dev_priv));
237 WARN_ON(!IS_HSW_ULT(dev_priv) &&
238 !IS_BDW_ULT(dev_priv));
239 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
240 dev_priv->pch_type = PCH_SPT;
241 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
242 WARN_ON(!IS_SKYLAKE(dev_priv) &&
243 !IS_KABYLAKE(dev_priv));
244 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
245 dev_priv->pch_type = PCH_SPT;
246 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
247 WARN_ON(!IS_SKYLAKE(dev_priv) &&
248 !IS_KABYLAKE(dev_priv));
249 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
250 dev_priv->pch_type = PCH_KBP;
251 DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
252 WARN_ON(!IS_SKYLAKE(dev_priv) &&
253 !IS_KABYLAKE(dev_priv) &&
254 !IS_COFFEELAKE(dev_priv));
255 } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
256 dev_priv->pch_type = PCH_CNP;
257 DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
258 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
259 !IS_COFFEELAKE(dev_priv));
260 } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
261 dev_priv->pch_type = PCH_CNP;
262 DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
263 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
264 !IS_COFFEELAKE(dev_priv));
265 } else if (id == INTEL_PCH_ICP_DEVICE_ID_TYPE) {
266 dev_priv->pch_type = PCH_ICP;
267 DRM_DEBUG_KMS("Found Ice Lake PCH\n");
268 WARN_ON(!IS_ICELAKE(dev_priv));
269 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
270 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
271 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
272 pch->subsystem_vendor ==
273 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
274 pch->subsystem_device ==
275 PCI_SUBDEVICE_ID_QEMU)) {
276 dev_priv->pch_type = intel_virt_detect_pch(dev_priv);
277 } else {
278 continue;
279 } 288 }
280
281 break;
282 } 289 }
283 if (!pch) 290 if (!pch)
284 DRM_DEBUG_KMS("No PCH found.\n"); 291 DRM_DEBUG_KMS("No PCH found.\n");
@@ -286,8 +293,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
286 pci_dev_put(pch); 293 pci_dev_put(pch);
287} 294}
288 295
289static int i915_getparam(struct drm_device *dev, void *data, 296static int i915_getparam_ioctl(struct drm_device *dev, void *data,
290 struct drm_file *file_priv) 297 struct drm_file *file_priv)
291{ 298{
292 struct drm_i915_private *dev_priv = to_i915(dev); 299 struct drm_i915_private *dev_priv = to_i915(dev);
293 struct pci_dev *pdev = dev_priv->drm.pdev; 300 struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -381,13 +388,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
381 value = i915_gem_mmap_gtt_version(); 388 value = i915_gem_mmap_gtt_version();
382 break; 389 break;
383 case I915_PARAM_HAS_SCHEDULER: 390 case I915_PARAM_HAS_SCHEDULER:
384 value = 0; 391 value = dev_priv->caps.scheduler;
385 if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
386 value |= I915_SCHEDULER_CAP_ENABLED;
387 value |= I915_SCHEDULER_CAP_PRIORITY;
388 if (HAS_LOGICAL_RING_PREEMPTION(dev_priv))
389 value |= I915_SCHEDULER_CAP_PREEMPTION;
390 }
391 break; 392 break;
392 393
393 case I915_PARAM_MMAP_VERSION: 394 case I915_PARAM_MMAP_VERSION:
@@ -879,6 +880,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
879/** 880/**
880 * i915_driver_init_early - setup state not requiring device access 881 * i915_driver_init_early - setup state not requiring device access
881 * @dev_priv: device private 882 * @dev_priv: device private
883 * @ent: the matching pci_device_id
882 * 884 *
883 * Initialize everything that is a "SW-only" state, that is state not 885 * Initialize everything that is a "SW-only" state, that is state not
884 * requiring accessing the device or exposing the driver via kernel internal 886 * requiring accessing the device or exposing the driver via kernel internal
@@ -904,11 +906,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
904 906
905 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 907 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
906 sizeof(device_info->platform_mask) * BITS_PER_BYTE); 908 sizeof(device_info->platform_mask) * BITS_PER_BYTE);
907 device_info->platform_mask = BIT(device_info->platform);
908
909 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 909 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
910 device_info->gen_mask = BIT(device_info->gen - 1);
911
912 spin_lock_init(&dev_priv->irq_lock); 910 spin_lock_init(&dev_priv->irq_lock);
913 spin_lock_init(&dev_priv->gpu_error.lock); 911 spin_lock_init(&dev_priv->gpu_error.lock);
914 mutex_init(&dev_priv->backlight_lock); 912 mutex_init(&dev_priv->backlight_lock);
@@ -1446,19 +1444,7 @@ void i915_driver_unload(struct drm_device *dev)
1446 1444
1447 intel_modeset_cleanup(dev); 1445 intel_modeset_cleanup(dev);
1448 1446
1449 /* 1447 intel_bios_cleanup(dev_priv);
1450 * free the memory space allocated for the child device
1451 * config parsed from VBT
1452 */
1453 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1454 kfree(dev_priv->vbt.child_dev);
1455 dev_priv->vbt.child_dev = NULL;
1456 dev_priv->vbt.child_dev_num = 0;
1457 }
1458 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1459 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1460 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1461 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1462 1448
1463 vga_switcheroo_unregister_client(pdev); 1449 vga_switcheroo_unregister_client(pdev);
1464 vga_client_register(pdev, NULL, NULL, NULL); 1450 vga_client_register(pdev, NULL, NULL, NULL);
@@ -1925,7 +1911,6 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
1925 ret = i915_gem_reset_prepare(i915); 1911 ret = i915_gem_reset_prepare(i915);
1926 if (ret) { 1912 if (ret) {
1927 dev_err(i915->drm.dev, "GPU recovery failed\n"); 1913 dev_err(i915->drm.dev, "GPU recovery failed\n");
1928 intel_gpu_reset(i915, ALL_ENGINES);
1929 goto taint; 1914 goto taint;
1930 } 1915 }
1931 1916
@@ -1957,7 +1942,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
1957 */ 1942 */
1958 ret = i915_ggtt_enable_hw(i915); 1943 ret = i915_ggtt_enable_hw(i915);
1959 if (ret) { 1944 if (ret) {
1960 DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret); 1945 DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
1946 ret);
1961 goto error; 1947 goto error;
1962 } 1948 }
1963 1949
@@ -1974,7 +1960,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
1974 */ 1960 */
1975 ret = i915_gem_init_hw(i915); 1961 ret = i915_gem_init_hw(i915);
1976 if (ret) { 1962 if (ret) {
1977 DRM_ERROR("Failed hw init on reset %d\n", ret); 1963 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1964 ret);
1978 goto error; 1965 goto error;
1979 } 1966 }
1980 1967
@@ -2006,6 +1993,7 @@ taint:
2006error: 1993error:
2007 i915_gem_set_wedged(i915); 1994 i915_gem_set_wedged(i915);
2008 i915_gem_retire_requests(i915); 1995 i915_gem_retire_requests(i915);
1996 intel_gpu_reset(i915, ALL_ENGINES);
2009 goto finish; 1997 goto finish;
2010} 1998}
2011 1999
@@ -2795,7 +2783,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
2795 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 2783 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2796 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 2784 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2797 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 2785 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2798 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 2786 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2799 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2787 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2800 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2788 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2801 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 2789 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -2807,8 +2795,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
2807 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 2795 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2808 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2796 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2809 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2797 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2810 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 2798 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
2811 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), 2799 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2812 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2800 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2813 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2801 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2814 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2802 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -2827,11 +2815,11 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
2827 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), 2815 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2828 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), 2816 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2829 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 2817 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2830 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 2818 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
2831 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 2819 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2832 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2820 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2833 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2821 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2834 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 2822 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2835 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 2823 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2836 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2824 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2837 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 2825 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ad1fc845cd1b..92883a40bdd5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -83,8 +83,8 @@
83 83
84#define DRIVER_NAME "i915" 84#define DRIVER_NAME "i915"
85#define DRIVER_DESC "Intel Graphics" 85#define DRIVER_DESC "Intel Graphics"
86#define DRIVER_DATE "20180207" 86#define DRIVER_DATE "20180221"
87#define DRIVER_TIMESTAMP 1517988364 87#define DRIVER_TIMESTAMP 1519219289
88 88
89/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 89/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -472,6 +472,7 @@ struct i915_gpu_state {
472 u32 reset_count; 472 u32 reset_count;
473 u32 suspend_count; 473 u32 suspend_count;
474 struct intel_device_info device_info; 474 struct intel_device_info device_info;
475 struct intel_driver_caps driver_caps;
475 struct i915_params params; 476 struct i915_params params;
476 477
477 struct i915_error_uc { 478 struct i915_error_uc {
@@ -666,6 +667,7 @@ struct intel_fbc {
666 */ 667 */
667 struct intel_fbc_state_cache { 668 struct intel_fbc_state_cache {
668 struct i915_vma *vma; 669 struct i915_vma *vma;
670 unsigned long flags;
669 671
670 struct { 672 struct {
671 unsigned int mode_flags; 673 unsigned int mode_flags;
@@ -704,6 +706,7 @@ struct intel_fbc {
704 */ 706 */
705 struct intel_fbc_reg_params { 707 struct intel_fbc_reg_params {
706 struct i915_vma *vma; 708 struct i915_vma *vma;
709 unsigned long flags;
707 710
708 struct { 711 struct {
709 enum pipe pipe; 712 enum pipe pipe;
@@ -722,7 +725,7 @@ struct intel_fbc {
722 725
723 struct intel_fbc_work { 726 struct intel_fbc_work {
724 bool scheduled; 727 bool scheduled;
725 u32 scheduled_vblank; 728 u64 scheduled_vblank;
726 struct work_struct work; 729 struct work_struct work;
727 } work; 730 } work;
728 731
@@ -946,6 +949,8 @@ struct intel_rps {
946 949
947struct intel_rc6 { 950struct intel_rc6 {
948 bool enabled; 951 bool enabled;
952 u64 prev_hw_residency[4];
953 u64 cur_residency[4];
949}; 954};
950 955
951struct intel_llc_pstate { 956struct intel_llc_pstate {
@@ -1092,6 +1097,11 @@ struct i915_gem_mm {
1092 struct llist_head free_list; 1097 struct llist_head free_list;
1093 struct work_struct free_work; 1098 struct work_struct free_work;
1094 spinlock_t free_lock; 1099 spinlock_t free_lock;
1100 /**
1101 * Count of objects pending destructions. Used to skip needlessly
1102 * waiting on an RCU barrier if no objects are waiting to be freed.
1103 */
1104 atomic_t free_count;
1095 1105
1096 /** 1106 /**
1097 * Small stash of WC pages 1107 * Small stash of WC pages
@@ -1356,6 +1366,7 @@ struct intel_vbt_data {
1356 u32 size; 1366 u32 size;
1357 u8 *data; 1367 u8 *data;
1358 const u8 *sequence[MIPI_SEQ_MAX]; 1368 const u8 *sequence[MIPI_SEQ_MAX];
1369 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
1359 } dsi; 1370 } dsi;
1360 1371
1361 int crt_ddc_pin; 1372 int crt_ddc_pin;
@@ -1815,6 +1826,7 @@ struct drm_i915_private {
1815 struct kmem_cache *priorities; 1826 struct kmem_cache *priorities;
1816 1827
1817 const struct intel_device_info info; 1828 const struct intel_device_info info;
1829 struct intel_driver_caps caps;
1818 1830
1819 /** 1831 /**
1820 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 1832 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
@@ -2419,12 +2431,16 @@ enum hdmi_force_audio {
2419 * We have one bit per pipe and per scanout plane type. 2431 * We have one bit per pipe and per scanout plane type.
2420 */ 2432 */
2421#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2433#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2422#define INTEL_FRONTBUFFER(pipe, plane_id) \ 2434#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
2423 (1 << ((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2435 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
2436 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
2437 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
2438})
2424#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2439#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2425 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2440 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2426#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2441#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2427 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2442 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
2443 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2428 2444
2429/* 2445/*
2430 * Optimised SGL iterator for GEM objects 2446 * Optimised SGL iterator for GEM objects
@@ -2799,7 +2815,7 @@ intel_info(const struct drm_i915_private *dev_priv)
2799 2815
2800#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2816#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
2801#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) 2817#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
2802#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7) 2818#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
2803 2819
2804#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2820#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2805 2821
@@ -2862,19 +2878,20 @@ intel_info(const struct drm_i915_private *dev_priv)
2862#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2878#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
2863 2879
2864#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2880#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2881#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
2865#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) 2882#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
2866#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) 2883#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
2867#define HAS_PCH_CNP_LP(dev_priv) \ 2884#define HAS_PCH_CNP_LP(dev_priv) \
2868 ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) 2885 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
2869#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2886#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2870#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2887#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2871#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2888#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2872#define HAS_PCH_LPT_LP(dev_priv) \ 2889#define HAS_PCH_LPT_LP(dev_priv) \
2873 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \ 2890 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
2874 (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) 2891 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
2875#define HAS_PCH_LPT_H(dev_priv) \ 2892#define HAS_PCH_LPT_H(dev_priv) \
2876 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \ 2893 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
2877 (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE) 2894 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
2878#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2895#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2879#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2896#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2880#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2897#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
@@ -3081,10 +3098,10 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3081 struct drm_file *file_priv); 3098 struct drm_file *file_priv);
3082int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 3099int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3083 struct drm_file *file_priv); 3100 struct drm_file *file_priv);
3084int i915_gem_execbuffer(struct drm_device *dev, void *data, 3101int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
3085 struct drm_file *file_priv); 3102 struct drm_file *file_priv);
3086int i915_gem_execbuffer2(struct drm_device *dev, void *data, 3103int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
3087 struct drm_file *file_priv); 3104 struct drm_file *file_priv);
3088int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3105int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3089 struct drm_file *file_priv); 3106 struct drm_file *file_priv);
3090int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3107int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -3128,6 +3145,9 @@ void i915_gem_free_object(struct drm_gem_object *obj);
3128 3145
3129static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 3146static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
3130{ 3147{
3148 if (!atomic_read(&i915->mm.free_count))
3149 return;
3150
3131 /* A single pass should suffice to release all the freed objects (along 3151 /* A single pass should suffice to release all the freed objects (along
3132 * most call paths) , but be a little more paranoid in that freeing 3152 * most call paths) , but be a little more paranoid in that freeing
3133 * the objects does take a little amount of time, during which the rcu 3153 * the objects does take a little amount of time, during which the rcu
@@ -3399,7 +3419,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
3399struct i915_vma * __must_check 3419struct i915_vma * __must_check
3400i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3420i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3401 u32 alignment, 3421 u32 alignment,
3402 const struct i915_ggtt_view *view); 3422 const struct i915_ggtt_view *view,
3423 unsigned int flags);
3403void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3424void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3404int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3425int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3405 int align); 3426 int align);
@@ -3675,6 +3696,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3675 3696
3676/* intel_bios.c */ 3697/* intel_bios.c */
3677void intel_bios_init(struct drm_i915_private *dev_priv); 3698void intel_bios_init(struct drm_i915_private *dev_priv);
3699void intel_bios_cleanup(struct drm_i915_private *dev_priv);
3678bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3700bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3679bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3701bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3680bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3702bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1632f18e6a64..43afa1c1b14f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -240,8 +240,8 @@ err_phys:
240 240
241static void __start_cpu_write(struct drm_i915_gem_object *obj) 241static void __start_cpu_write(struct drm_i915_gem_object *obj)
242{ 242{
243 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 243 obj->read_domains = I915_GEM_DOMAIN_CPU;
244 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 244 obj->write_domain = I915_GEM_DOMAIN_CPU;
245 if (cpu_write_needs_clflush(obj)) 245 if (cpu_write_needs_clflush(obj))
246 obj->cache_dirty = true; 246 obj->cache_dirty = true;
247} 247}
@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
257 obj->mm.dirty = false; 257 obj->mm.dirty = false;
258 258
259 if (needs_clflush && 259 if (needs_clflush &&
260 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 260 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
261 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 261 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
262 drm_clflush_sg(pages); 262 drm_clflush_sg(pages);
263 263
@@ -703,10 +703,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
703 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 703 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
704 struct i915_vma *vma; 704 struct i915_vma *vma;
705 705
706 if (!(obj->base.write_domain & flush_domains)) 706 if (!(obj->write_domain & flush_domains))
707 return; 707 return;
708 708
709 switch (obj->base.write_domain) { 709 switch (obj->write_domain) {
710 case I915_GEM_DOMAIN_GTT: 710 case I915_GEM_DOMAIN_GTT:
711 i915_gem_flush_ggtt_writes(dev_priv); 711 i915_gem_flush_ggtt_writes(dev_priv);
712 712
@@ -731,7 +731,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
731 break; 731 break;
732 } 732 }
733 733
734 obj->base.write_domain = 0; 734 obj->write_domain = 0;
735} 735}
736 736
737static inline int 737static inline int
@@ -831,7 +831,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
831 * anyway again before the next pread happens. 831 * anyway again before the next pread happens.
832 */ 832 */
833 if (!obj->cache_dirty && 833 if (!obj->cache_dirty &&
834 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 834 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
835 *needs_clflush = CLFLUSH_BEFORE; 835 *needs_clflush = CLFLUSH_BEFORE;
836 836
837out: 837out:
@@ -890,7 +890,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
890 * Same trick applies to invalidate partially written 890 * Same trick applies to invalidate partially written
891 * cachelines read before writing. 891 * cachelines read before writing.
892 */ 892 */
893 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 893 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
894 *needs_clflush |= CLFLUSH_BEFORE; 894 *needs_clflush |= CLFLUSH_BEFORE;
895 } 895 }
896 896
@@ -2391,8 +2391,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2391 * wasn't in the GTT, there shouldn't be any way it could have been in 2391 * wasn't in the GTT, there shouldn't be any way it could have been in
2392 * a GPU cache 2392 * a GPU cache
2393 */ 2393 */
2394 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2394 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2395 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2395 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2396 2396
2397 st = kmalloc(sizeof(*st), GFP_KERNEL); 2397 st = kmalloc(sizeof(*st), GFP_KERNEL);
2398 if (st == NULL) 2398 if (st == NULL)
@@ -3205,6 +3205,9 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3205 intel_engine_dump(engine, &p, "%s\n", engine->name); 3205 intel_engine_dump(engine, &p, "%s\n", engine->name);
3206 } 3206 }
3207 3207
3208 set_bit(I915_WEDGED, &i915->gpu_error.flags);
3209 smp_mb__after_atomic();
3210
3208 /* 3211 /*
3209 * First, stop submission to hw, but do not yet complete requests by 3212 * First, stop submission to hw, but do not yet complete requests by
3210 * rolling the global seqno forward (since this would complete requests 3213 * rolling the global seqno forward (since this would complete requests
@@ -3229,8 +3232,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3229 * start to complete all requests. 3232 * start to complete all requests.
3230 */ 3233 */
3231 engine->submit_request = nop_complete_submit_request; 3234 engine->submit_request = nop_complete_submit_request;
3235 engine->schedule = NULL;
3232 } 3236 }
3233 3237
3238 i915->caps.scheduler = 0;
3239
3234 /* 3240 /*
3235 * Make sure no request can slip through without getting completed by 3241 * Make sure no request can slip through without getting completed by
3236 * either this call here to intel_engine_init_global_seqno, or the one 3242 * either this call here to intel_engine_init_global_seqno, or the one
@@ -3241,7 +3247,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3241 for_each_engine(engine, i915, id) { 3247 for_each_engine(engine, i915, id) {
3242 unsigned long flags; 3248 unsigned long flags;
3243 3249
3244 /* Mark all pending requests as complete so that any concurrent 3250 /*
3251 * Mark all pending requests as complete so that any concurrent
3245 * (lockless) lookup doesn't try and wait upon the request as we 3252 * (lockless) lookup doesn't try and wait upon the request as we
3246 * reset it. 3253 * reset it.
3247 */ 3254 */
@@ -3251,7 +3258,6 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3251 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3258 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3252 } 3259 }
3253 3260
3254 set_bit(I915_WEDGED, &i915->gpu_error.flags);
3255 wake_up_all(&i915->gpu_error.reset_queue); 3261 wake_up_all(&i915->gpu_error.reset_queue);
3256} 3262}
3257 3263
@@ -3697,7 +3703,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3697 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 3703 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3698 if (obj->cache_dirty) 3704 if (obj->cache_dirty)
3699 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3705 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3700 obj->base.write_domain = 0; 3706 obj->write_domain = 0;
3701} 3707}
3702 3708
3703void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) 3709void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
@@ -3734,7 +3740,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3734 if (ret) 3740 if (ret)
3735 return ret; 3741 return ret;
3736 3742
3737 if (obj->base.write_domain == I915_GEM_DOMAIN_WC) 3743 if (obj->write_domain == I915_GEM_DOMAIN_WC)
3738 return 0; 3744 return 0;
3739 3745
3740 /* Flush and acquire obj->pages so that we are coherent through 3746 /* Flush and acquire obj->pages so that we are coherent through
@@ -3755,17 +3761,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3755 * coherent writes from the GPU, by effectively invalidating the 3761 * coherent writes from the GPU, by effectively invalidating the
3756 * WC domain upon first access. 3762 * WC domain upon first access.
3757 */ 3763 */
3758 if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0) 3764 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
3759 mb(); 3765 mb();
3760 3766
3761 /* It should now be out of any other write domains, and we can update 3767 /* It should now be out of any other write domains, and we can update
3762 * the domain values for our changes. 3768 * the domain values for our changes.
3763 */ 3769 */
3764 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0); 3770 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3765 obj->base.read_domains |= I915_GEM_DOMAIN_WC; 3771 obj->read_domains |= I915_GEM_DOMAIN_WC;
3766 if (write) { 3772 if (write) {
3767 obj->base.read_domains = I915_GEM_DOMAIN_WC; 3773 obj->read_domains = I915_GEM_DOMAIN_WC;
3768 obj->base.write_domain = I915_GEM_DOMAIN_WC; 3774 obj->write_domain = I915_GEM_DOMAIN_WC;
3769 obj->mm.dirty = true; 3775 obj->mm.dirty = true;
3770 } 3776 }
3771 3777
@@ -3797,7 +3803,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3797 if (ret) 3803 if (ret)
3798 return ret; 3804 return ret;
3799 3805
3800 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3806 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
3801 return 0; 3807 return 0;
3802 3808
3803 /* Flush and acquire obj->pages so that we are coherent through 3809 /* Flush and acquire obj->pages so that we are coherent through
@@ -3818,17 +3824,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3818 * coherent writes from the GPU, by effectively invalidating the 3824 * coherent writes from the GPU, by effectively invalidating the
3819 * GTT domain upon first access. 3825 * GTT domain upon first access.
3820 */ 3826 */
3821 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3827 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
3822 mb(); 3828 mb();
3823 3829
3824 /* It should now be out of any other write domains, and we can update 3830 /* It should now be out of any other write domains, and we can update
3825 * the domain values for our changes. 3831 * the domain values for our changes.
3826 */ 3832 */
3827 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3833 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3828 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3834 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3829 if (write) { 3835 if (write) {
3830 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3836 obj->read_domains = I915_GEM_DOMAIN_GTT;
3831 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3837 obj->write_domain = I915_GEM_DOMAIN_GTT;
3832 obj->mm.dirty = true; 3838 obj->mm.dirty = true;
3833 } 3839 }
3834 3840
@@ -4072,7 +4078,8 @@ out:
4072struct i915_vma * 4078struct i915_vma *
4073i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 4079i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4074 u32 alignment, 4080 u32 alignment,
4075 const struct i915_ggtt_view *view) 4081 const struct i915_ggtt_view *view,
4082 unsigned int flags)
4076{ 4083{
4077 struct i915_vma *vma; 4084 struct i915_vma *vma;
4078 int ret; 4085 int ret;
@@ -4109,25 +4116,14 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4109 * try to preserve the existing ABI). 4116 * try to preserve the existing ABI).
4110 */ 4117 */
4111 vma = ERR_PTR(-ENOSPC); 4118 vma = ERR_PTR(-ENOSPC);
4112 if (!view || view->type == I915_GGTT_VIEW_NORMAL) 4119 if ((flags & PIN_MAPPABLE) == 0 &&
4120 (!view || view->type == I915_GGTT_VIEW_NORMAL))
4113 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 4121 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
4114 PIN_MAPPABLE | PIN_NONBLOCK); 4122 flags |
4115 if (IS_ERR(vma)) { 4123 PIN_MAPPABLE |
4116 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4124 PIN_NONBLOCK);
4117 unsigned int flags; 4125 if (IS_ERR(vma))
4118
4119 /* Valleyview is definitely limited to scanning out the first
4120 * 512MiB. Lets presume this behaviour was inherited from the
4121 * g4x display engine and that all earlier gen are similarly
4122 * limited. Testing suggests that it is a little more
4123 * complicated than this. For example, Cherryview appears quite
4124 * happy to scanout from anywhere within its global aperture.
4125 */
4126 flags = 0;
4127 if (HAS_GMCH_DISPLAY(i915))
4128 flags = PIN_MAPPABLE;
4129 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 4126 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
4130 }
4131 if (IS_ERR(vma)) 4127 if (IS_ERR(vma))
4132 goto err_unpin_global; 4128 goto err_unpin_global;
4133 4129
@@ -4140,7 +4136,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4140 /* It should now be out of any other write domains, and we can update 4136 /* It should now be out of any other write domains, and we can update
4141 * the domain values for our changes. 4137 * the domain values for our changes.
4142 */ 4138 */
4143 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 4139 obj->read_domains |= I915_GEM_DOMAIN_GTT;
4144 4140
4145 return vma; 4141 return vma;
4146 4142
@@ -4193,15 +4189,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4193 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 4189 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
4194 4190
4195 /* Flush the CPU cache if it's still invalid. */ 4191 /* Flush the CPU cache if it's still invalid. */
4196 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4192 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4197 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 4193 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
4198 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4194 obj->read_domains |= I915_GEM_DOMAIN_CPU;
4199 } 4195 }
4200 4196
4201 /* It should now be out of any other write domains, and we can update 4197 /* It should now be out of any other write domains, and we can update
4202 * the domain values for our changes. 4198 * the domain values for our changes.
4203 */ 4199 */
4204 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 4200 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
4205 4201
4206 /* If we're writing through the CPU, then the GPU read domains will 4202 /* If we're writing through the CPU, then the GPU read domains will
4207 * need to be invalidated at next use. 4203 * need to be invalidated at next use.
@@ -4276,7 +4272,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4276 4272
4277 lockdep_assert_held(&obj->base.dev->struct_mutex); 4273 lockdep_assert_held(&obj->base.dev->struct_mutex);
4278 4274
4279 if (!view && flags & PIN_MAPPABLE) { 4275 if (flags & PIN_MAPPABLE &&
4276 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
4280 /* If the required space is larger than the available 4277 /* If the required space is larger than the available
4281 * aperture, we will not able to find a slot for the 4278 * aperture, we will not able to find a slot for the
4282 * object and unbinding the object now will be in 4279 * object and unbinding the object now will be in
@@ -4637,8 +4634,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4637 4634
4638 i915_gem_object_init(obj, &i915_gem_object_ops); 4635 i915_gem_object_init(obj, &i915_gem_object_ops);
4639 4636
4640 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4637 obj->write_domain = I915_GEM_DOMAIN_CPU;
4641 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4638 obj->read_domains = I915_GEM_DOMAIN_CPU;
4642 4639
4643 if (HAS_LLC(dev_priv)) 4640 if (HAS_LLC(dev_priv))
4644 /* On some devices, we can have the GPU use the LLC (the CPU 4641 /* On some devices, we can have the GPU use the LLC (the CPU
@@ -4752,6 +4749,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
4752 kfree(obj->bit_17); 4749 kfree(obj->bit_17);
4753 i915_gem_object_free(obj); 4750 i915_gem_object_free(obj);
4754 4751
4752 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
4753 atomic_dec(&i915->mm.free_count);
4754
4755 if (on) 4755 if (on)
4756 cond_resched(); 4756 cond_resched();
4757 } 4757 }
@@ -4840,6 +4840,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4840 * i915_gem_busy_ioctl(). For the corresponding synchronized 4840 * i915_gem_busy_ioctl(). For the corresponding synchronized
4841 * lookup see i915_gem_object_lookup_rcu(). 4841 * lookup see i915_gem_object_lookup_rcu().
4842 */ 4842 */
4843 atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
4843 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4844 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4844} 4845}
4845 4846
@@ -4882,10 +4883,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
4882 * it may impact the display and we are uncertain about the stability 4883 * it may impact the display and we are uncertain about the stability
4883 * of the reset, so this could be applied to even earlier gen. 4884 * of the reset, so this could be applied to even earlier gen.
4884 */ 4885 */
4885 if (INTEL_GEN(i915) >= 5) { 4886 if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
4886 int reset = intel_gpu_reset(i915, ALL_ENGINES); 4887 WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
4887 WARN_ON(reset && reset != -ENODEV);
4888 }
4889} 4888}
4890 4889
4891int i915_gem_suspend(struct drm_i915_private *dev_priv) 4890int i915_gem_suspend(struct drm_i915_private *dev_priv)
@@ -5065,8 +5064,11 @@ static int __i915_gem_restart_engines(void *data)
5065 5064
5066 for_each_engine(engine, i915, id) { 5065 for_each_engine(engine, i915, id) {
5067 err = engine->init_hw(engine); 5066 err = engine->init_hw(engine);
5068 if (err) 5067 if (err) {
5068 DRM_ERROR("Failed to restart %s (%d)\n",
5069 engine->name, err);
5069 return err; 5070 return err;
5071 }
5070 } 5072 }
5071 5073
5072 return 0; 5074 return 0;
@@ -5118,14 +5120,16 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
5118 5120
5119 ret = i915_ppgtt_init_hw(dev_priv); 5121 ret = i915_ppgtt_init_hw(dev_priv);
5120 if (ret) { 5122 if (ret) {
5121 DRM_ERROR("PPGTT enable HW failed %d\n", ret); 5123 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
5122 goto out; 5124 goto out;
5123 } 5125 }
5124 5126
5125 /* We can't enable contexts until all firmware is loaded */ 5127 /* We can't enable contexts until all firmware is loaded */
5126 ret = intel_uc_init_hw(dev_priv); 5128 ret = intel_uc_init_hw(dev_priv);
5127 if (ret) 5129 if (ret) {
5130 DRM_ERROR("Enabling uc failed (%d)\n", ret);
5128 goto out; 5131 goto out;
5132 }
5129 5133
5130 intel_mocs_init_l3cc_table(dev_priv); 5134 intel_mocs_init_l3cc_table(dev_priv);
5131 5135
@@ -5415,10 +5419,10 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5415{ 5419{
5416 int i; 5420 int i;
5417 5421
5418 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5422 if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5419 !IS_CHERRYVIEW(dev_priv)) 5423 !IS_CHERRYVIEW(dev_priv))
5420 dev_priv->num_fence_regs = 32; 5424 dev_priv->num_fence_regs = 32;
5421 else if (INTEL_INFO(dev_priv)->gen >= 4 || 5425 else if (INTEL_GEN(dev_priv) >= 4 ||
5422 IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 5426 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5423 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 5427 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5424 dev_priv->num_fence_regs = 16; 5428 dev_priv->num_fence_regs = 16;
@@ -5537,7 +5541,8 @@ err_out:
5537void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) 5541void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
5538{ 5542{
5539 i915_gem_drain_freed_objects(dev_priv); 5543 i915_gem_drain_freed_objects(dev_priv);
5540 WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 5544 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
5545 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
5541 WARN_ON(dev_priv->mm.object_count); 5546 WARN_ON(dev_priv->mm.object_count);
5542 5547
5543 mutex_lock(&dev_priv->drm.struct_mutex); 5548 mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5693,7 +5698,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5693 if (IS_ERR(obj)) 5698 if (IS_ERR(obj))
5694 return obj; 5699 return obj;
5695 5700
5696 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 5701 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
5697 5702
5698 file = obj->base.filp; 5703 file = obj->base.filp;
5699 offset = 0; 5704 offset = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index b9b53ac14176..f5c570d35b2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -177,7 +177,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
177 } else if (obj->mm.pages) { 177 } else if (obj->mm.pages) {
178 __i915_do_clflush(obj); 178 __i915_do_clflush(obj);
179 } else { 179 } else {
180 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 180 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
181 } 181 }
182 182
183 obj->cache_dirty = false; 183 obj->cache_dirty = false;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 648e7536ff51..3d75f484f6e5 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -338,11 +338,6 @@ static void __destroy_hw_context(struct i915_gem_context *ctx,
338 context_close(ctx); 338 context_close(ctx);
339} 339}
340 340
341/**
342 * The default context needs to exist per ring that uses contexts. It stores the
343 * context state of the GPU for applications that don't utilize HW contexts, as
344 * well as an idle case.
345 */
346static struct i915_gem_context * 341static struct i915_gem_context *
347i915_gem_create_context(struct drm_i915_private *dev_priv, 342i915_gem_create_context(struct drm_i915_private *dev_priv,
348 struct drm_i915_file_private *file_priv) 343 struct drm_i915_file_private *file_priv)
@@ -449,12 +444,18 @@ destroy_kernel_context(struct i915_gem_context **ctxp)
449 i915_gem_context_free(ctx); 444 i915_gem_context_free(ctx);
450} 445}
451 446
447static bool needs_preempt_context(struct drm_i915_private *i915)
448{
449 return HAS_LOGICAL_RING_PREEMPTION(i915);
450}
451
452int i915_gem_contexts_init(struct drm_i915_private *dev_priv) 452int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
453{ 453{
454 struct i915_gem_context *ctx; 454 struct i915_gem_context *ctx;
455 int err;
456 455
456 /* Reassure ourselves we are only called once */
457 GEM_BUG_ON(dev_priv->kernel_context); 457 GEM_BUG_ON(dev_priv->kernel_context);
458 GEM_BUG_ON(dev_priv->preempt_context);
458 459
459 INIT_LIST_HEAD(&dev_priv->contexts.list); 460 INIT_LIST_HEAD(&dev_priv->contexts.list);
460 INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); 461 INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
@@ -468,8 +469,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
468 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); 469 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
469 if (IS_ERR(ctx)) { 470 if (IS_ERR(ctx)) {
470 DRM_ERROR("Failed to create default global context\n"); 471 DRM_ERROR("Failed to create default global context\n");
471 err = PTR_ERR(ctx); 472 return PTR_ERR(ctx);
472 goto err;
473 } 473 }
474 /* 474 /*
475 * For easy recognisablity, we want the kernel context to be 0 and then 475 * For easy recognisablity, we want the kernel context to be 0 and then
@@ -479,23 +479,18 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
479 dev_priv->kernel_context = ctx; 479 dev_priv->kernel_context = ctx;
480 480
481 /* highest priority; preempting task */ 481 /* highest priority; preempting task */
482 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); 482 if (needs_preempt_context(dev_priv)) {
483 if (IS_ERR(ctx)) { 483 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
484 DRM_ERROR("Failed to create default preempt context\n"); 484 if (!IS_ERR(ctx))
485 err = PTR_ERR(ctx); 485 dev_priv->preempt_context = ctx;
486 goto err_kernel_context; 486 else
487 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
487 } 488 }
488 dev_priv->preempt_context = ctx;
489 489
490 DRM_DEBUG_DRIVER("%s context support initialized\n", 490 DRM_DEBUG_DRIVER("%s context support initialized\n",
491 dev_priv->engine[RCS]->context_size ? "logical" : 491 dev_priv->engine[RCS]->context_size ? "logical" :
492 "fake"); 492 "fake");
493 return 0; 493 return 0;
494
495err_kernel_context:
496 destroy_kernel_context(&dev_priv->kernel_context);
497err:
498 return err;
499} 494}
500 495
501void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) 496void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -521,7 +516,8 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
521{ 516{
522 lockdep_assert_held(&i915->drm.struct_mutex); 517 lockdep_assert_held(&i915->drm.struct_mutex);
523 518
524 destroy_kernel_context(&i915->preempt_context); 519 if (i915->preempt_context)
520 destroy_kernel_context(&i915->preempt_context);
525 destroy_kernel_context(&i915->kernel_context); 521 destroy_kernel_context(&i915->kernel_context);
526 522
527 /* Must free all deferred contexts (via flush_workqueue) first */ 523 /* Must free all deferred contexts (via flush_workqueue) first */
@@ -803,11 +799,11 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
803 799
804 case I915_CONTEXT_PARAM_PRIORITY: 800 case I915_CONTEXT_PARAM_PRIORITY:
805 { 801 {
806 int priority = args->value; 802 s64 priority = args->value;
807 803
808 if (args->size) 804 if (args->size)
809 ret = -EINVAL; 805 ret = -EINVAL;
810 else if (!to_i915(dev)->engine[RCS]->schedule) 806 else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
811 ret = -ENODEV; 807 ret = -ENODEV;
812 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 808 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
813 priority < I915_CONTEXT_MIN_USER_PRIORITY) 809 priority < I915_CONTEXT_MIN_USER_PRIORITY)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 4bfb72f8e1cb..a681c5b891ff 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -29,6 +29,8 @@
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/radix-tree.h> 30#include <linux/radix-tree.h>
31 31
32#include "i915_gem.h"
33
32struct pid; 34struct pid;
33 35
34struct drm_device; 36struct drm_device;
@@ -36,6 +38,7 @@ struct drm_file;
36 38
37struct drm_i915_private; 39struct drm_i915_private;
38struct drm_i915_file_private; 40struct drm_i915_file_private;
41struct drm_i915_gem_request;
39struct i915_hw_ppgtt; 42struct i915_hw_ppgtt;
40struct i915_vma; 43struct i915_vma;
41struct intel_ring; 44struct intel_ring;
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 864439a214c8..69a7aec49e84 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -330,8 +330,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
330 * write-combined buffer or a delay through the chipset for GTT 330 * write-combined buffer or a delay through the chipset for GTT
331 * writes that do require us to treat GTT as a separate cache domain.) 331 * writes that do require us to treat GTT as a separate cache domain.)
332 */ 332 */
333 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 333 obj->read_domains = I915_GEM_DOMAIN_GTT;
334 obj->base.write_domain = 0; 334 obj->write_domain = 0;
335 335
336 return &obj->base; 336 return &obj->base;
337 337
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4401068ff468..4eb28e84fda4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
505 list_add_tail(&vma->exec_link, &eb->unbound); 505 list_add_tail(&vma->exec_link, &eb->unbound);
506 if (drm_mm_node_allocated(&vma->node)) 506 if (drm_mm_node_allocated(&vma->node))
507 err = i915_vma_unbind(vma); 507 err = i915_vma_unbind(vma);
508 if (unlikely(err))
509 vma->exec_flags = NULL;
508 } 510 }
509 return err; 511 return err;
510} 512}
@@ -1073,7 +1075,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1073 u32 *cmd; 1075 u32 *cmd;
1074 int err; 1076 int err;
1075 1077
1076 GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU); 1078 GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
1077 1079
1078 obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); 1080 obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
1079 if (IS_ERR(obj)) 1081 if (IS_ERR(obj))
@@ -1861,16 +1863,16 @@ void i915_vma_move_to_active(struct i915_vma *vma,
1861 i915_gem_active_set(&vma->last_read[idx], req); 1863 i915_gem_active_set(&vma->last_read[idx], req);
1862 list_move_tail(&vma->vm_link, &vma->vm->active_list); 1864 list_move_tail(&vma->vm_link, &vma->vm->active_list);
1863 1865
1864 obj->base.write_domain = 0; 1866 obj->write_domain = 0;
1865 if (flags & EXEC_OBJECT_WRITE) { 1867 if (flags & EXEC_OBJECT_WRITE) {
1866 obj->base.write_domain = I915_GEM_DOMAIN_RENDER; 1868 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1867 1869
1868 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 1870 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1869 i915_gem_active_set(&obj->frontbuffer_write, req); 1871 i915_gem_active_set(&obj->frontbuffer_write, req);
1870 1872
1871 obj->base.read_domains = 0; 1873 obj->read_domains = 0;
1872 } 1874 }
1873 obj->base.read_domains |= I915_GEM_GPU_DOMAINS; 1875 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1874 1876
1875 if (flags & EXEC_OBJECT_NEEDS_FENCE) 1877 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1876 i915_gem_active_set(&vma->last_fence, req); 1878 i915_gem_active_set(&vma->last_fence, req);
@@ -1973,7 +1975,7 @@ static int eb_submit(struct i915_execbuffer *eb)
1973 return 0; 1975 return 0;
1974} 1976}
1975 1977
1976/** 1978/*
1977 * Find one BSD ring to dispatch the corresponding BSD command. 1979 * Find one BSD ring to dispatch the corresponding BSD command.
1978 * The engine index is returned. 1980 * The engine index is returned.
1979 */ 1981 */
@@ -2410,7 +2412,7 @@ err_request:
2410 if (out_fence) { 2412 if (out_fence) {
2411 if (err == 0) { 2413 if (err == 0) {
2412 fd_install(out_fence_fd, out_fence->file); 2414 fd_install(out_fence_fd, out_fence->file);
2413 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ 2415 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2414 args->rsvd2 |= (u64)out_fence_fd << 32; 2416 args->rsvd2 |= (u64)out_fence_fd << 32;
2415 out_fence_fd = -1; 2417 out_fence_fd = -1;
2416 } else { 2418 } else {
@@ -2463,8 +2465,8 @@ static bool check_buffer_count(size_t count)
2463 * list array and passes it to the real function. 2465 * list array and passes it to the real function.
2464 */ 2466 */
2465int 2467int
2466i915_gem_execbuffer(struct drm_device *dev, void *data, 2468i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2467 struct drm_file *file) 2469 struct drm_file *file)
2468{ 2470{
2469 struct drm_i915_gem_execbuffer *args = data; 2471 struct drm_i915_gem_execbuffer *args = data;
2470 struct drm_i915_gem_execbuffer2 exec2; 2472 struct drm_i915_gem_execbuffer2 exec2;
@@ -2554,8 +2556,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2554} 2556}
2555 2557
2556int 2558int
2557i915_gem_execbuffer2(struct drm_device *dev, void *data, 2559i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2558 struct drm_file *file) 2560 struct drm_file *file)
2559{ 2561{
2560 struct drm_i915_gem_execbuffer2 *args = data; 2562 struct drm_i915_gem_execbuffer2 *args = data;
2561 struct drm_i915_gem_exec_object2 *exec2_list; 2563 struct drm_i915_gem_exec_object2 *exec2_list;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index b8338d75c6f3..d548ac05ccd7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -64,7 +64,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
64 int fence_pitch_shift; 64 int fence_pitch_shift;
65 u64 val; 65 u64 val;
66 66
67 if (INTEL_INFO(fence->i915)->gen >= 6) { 67 if (INTEL_GEN(fence->i915) >= 6) {
68 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); 68 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
69 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); 69 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
70 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT; 70 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 955ce7bee448..cd5984246bc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -673,27 +673,22 @@ static void free_pd(struct i915_address_space *vm,
673static void gen8_initialize_pd(struct i915_address_space *vm, 673static void gen8_initialize_pd(struct i915_address_space *vm,
674 struct i915_page_directory *pd) 674 struct i915_page_directory *pd)
675{ 675{
676 unsigned int i;
677
678 fill_px(vm, pd, 676 fill_px(vm, pd,
679 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); 677 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
680 for (i = 0; i < I915_PDES; i++) 678 memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
681 pd->page_table[i] = vm->scratch_pt;
682} 679}
683 680
684static int __pdp_init(struct i915_address_space *vm, 681static int __pdp_init(struct i915_address_space *vm,
685 struct i915_page_directory_pointer *pdp) 682 struct i915_page_directory_pointer *pdp)
686{ 683{
687 const unsigned int pdpes = i915_pdpes_per_pdp(vm); 684 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
688 unsigned int i;
689 685
690 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), 686 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
691 GFP_KERNEL | __GFP_NOWARN); 687 GFP_KERNEL | __GFP_NOWARN);
692 if (unlikely(!pdp->page_directory)) 688 if (unlikely(!pdp->page_directory))
693 return -ENOMEM; 689 return -ENOMEM;
694 690
695 for (i = 0; i < pdpes; i++) 691 memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
696 pdp->page_directory[i] = vm->scratch_pd;
697 692
698 return 0; 693 return 0;
699} 694}
@@ -715,7 +710,7 @@ alloc_pdp(struct i915_address_space *vm)
715 struct i915_page_directory_pointer *pdp; 710 struct i915_page_directory_pointer *pdp;
716 int ret = -ENOMEM; 711 int ret = -ENOMEM;
717 712
718 WARN_ON(!use_4lvl(vm)); 713 GEM_BUG_ON(!use_4lvl(vm));
719 714
720 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); 715 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
721 if (!pdp) 716 if (!pdp)
@@ -764,12 +759,9 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
764static void gen8_initialize_pml4(struct i915_address_space *vm, 759static void gen8_initialize_pml4(struct i915_address_space *vm,
765 struct i915_pml4 *pml4) 760 struct i915_pml4 *pml4)
766{ 761{
767 unsigned int i;
768
769 fill_px(vm, pml4, 762 fill_px(vm, pml4,
770 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); 763 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
771 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) 764 memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
772 pml4->pdps[i] = vm->scratch_pdp;
773} 765}
774 766
775/* Broadwell Page Directory Pointer Descriptors */ 767/* Broadwell Page Directory Pointer Descriptors */
@@ -2109,7 +2101,7 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2109 ppgtt->base.i915 = dev_priv; 2101 ppgtt->base.i915 = dev_priv;
2110 ppgtt->base.dma = &dev_priv->drm.pdev->dev; 2102 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
2111 2103
2112 if (INTEL_INFO(dev_priv)->gen < 8) 2104 if (INTEL_GEN(dev_priv) < 8)
2113 return gen6_ppgtt_init(ppgtt); 2105 return gen6_ppgtt_init(ppgtt);
2114 else 2106 else
2115 return gen8_ppgtt_init(ppgtt); 2107 return gen8_ppgtt_init(ppgtt);
@@ -2257,9 +2249,9 @@ void i915_ppgtt_release(struct kref *kref)
2257 trace_i915_ppgtt_release(&ppgtt->base); 2249 trace_i915_ppgtt_release(&ppgtt->base);
2258 2250
2259 /* vmas should already be unbound and destroyed */ 2251 /* vmas should already be unbound and destroyed */
2260 WARN_ON(!list_empty(&ppgtt->base.active_list)); 2252 GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
2261 WARN_ON(!list_empty(&ppgtt->base.inactive_list)); 2253 GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
2262 WARN_ON(!list_empty(&ppgtt->base.unbound_list)); 2254 GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
2263 2255
2264 ppgtt->base.cleanup(&ppgtt->base); 2256 ppgtt->base.cleanup(&ppgtt->base);
2265 i915_address_space_fini(&ppgtt->base); 2257 i915_address_space_fini(&ppgtt->base);
@@ -2822,10 +2814,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2822 2814
2823 i915->mm.aliasing_ppgtt = ppgtt; 2815 i915->mm.aliasing_ppgtt = ppgtt;
2824 2816
2825 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma); 2817 GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2826 ggtt->base.bind_vma = aliasing_gtt_bind_vma; 2818 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2827 2819
2828 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); 2820 GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2829 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma; 2821 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2830 2822
2831 return 0; 2823 return 0;
@@ -2916,7 +2908,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2916 ggtt->base.closed = true; 2908 ggtt->base.closed = true;
2917 2909
2918 mutex_lock(&dev_priv->drm.struct_mutex); 2910 mutex_lock(&dev_priv->drm.struct_mutex);
2919 WARN_ON(!list_empty(&ggtt->base.active_list)); 2911 GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
2920 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link) 2912 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2921 WARN_ON(i915_vma_unbind(vma)); 2913 WARN_ON(i915_vma_unbind(vma));
2922 mutex_unlock(&dev_priv->drm.struct_mutex); 2914 mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3809,6 +3801,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
3809 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 3801 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3810 3802
3811 switch (vma->ggtt_view.type) { 3803 switch (vma->ggtt_view.type) {
3804 default:
3805 GEM_BUG_ON(vma->ggtt_view.type);
3806 /* fall through */
3812 case I915_GGTT_VIEW_NORMAL: 3807 case I915_GGTT_VIEW_NORMAL:
3813 vma->pages = vma->obj->mm.pages; 3808 vma->pages = vma->obj->mm.pages;
3814 return 0; 3809 return 0;
@@ -3821,11 +3816,6 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
3821 case I915_GGTT_VIEW_PARTIAL: 3816 case I915_GGTT_VIEW_PARTIAL:
3822 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 3817 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3823 break; 3818 break;
3824
3825 default:
3826 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3827 vma->ggtt_view.type);
3828 return -EINVAL;
3829 } 3819 }
3830 3820
3831 ret = 0; 3821 ret = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index a1d6956734f7..0d0144b2104c 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -167,6 +167,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
167}; 167};
168 168
169/** 169/**
170 * i915_gem_object_create_internal: create an object with volatile pages
171 * @i915: the i915 device
172 * @size: the size in bytes of backing storage to allocate for the object
173 *
170 * Creates a new object that wraps some internal memory for private use. 174 * Creates a new object that wraps some internal memory for private use.
171 * This object is not backed by swappable storage, and as such its contents 175 * This object is not backed by swappable storage, and as such its contents
172 * are volatile and only valid whilst pinned. If the object is reaped by the 176 * are volatile and only valid whilst pinned. If the object is reaped by the
@@ -197,8 +201,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
197 drm_gem_private_object_init(&i915->drm, &obj->base, size); 201 drm_gem_private_object_init(&i915->drm, &obj->base, size);
198 i915_gem_object_init(obj, &i915_gem_object_internal_ops); 202 i915_gem_object_init(obj, &i915_gem_object_internal_ops);
199 203
200 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 204 obj->read_domains = I915_GEM_DOMAIN_CPU;
201 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 205 obj->write_domain = I915_GEM_DOMAIN_CPU;
202 206
203 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 207 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
204 i915_gem_object_set_cache_coherency(obj, cache_level); 208 i915_gem_object_set_cache_coherency(obj, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 05e89e1c0a08..ca2b3b62569d 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -148,6 +148,21 @@ struct drm_i915_gem_object {
148#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) 148#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
149 unsigned int cache_dirty:1; 149 unsigned int cache_dirty:1;
150 150
151 /**
152 * @read_domains: Read memory domains.
153 *
154 * These monitor which caches contain read/write data related to the
155 * object. When transitioning from one set of domains to another,
156 * the driver is called to ensure that caches are suitably flushed and
157 * invalidated.
158 */
159 u16 read_domains;
160
161 /**
162 * @write_domain: Corresponding unique write memory domain.
163 */
164 u16 write_domain;
165
151 atomic_t frontbuffer_bits; 166 atomic_t frontbuffer_bits;
152 unsigned int frontbuffer_ggtt_origin; /* write once */ 167 unsigned int frontbuffer_ggtt_origin; /* write once */
153 struct i915_gem_active frontbuffer_write; 168 struct i915_gem_active frontbuffer_write;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8efa9e7a9e46..8bc7c50b8418 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -443,12 +443,14 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
443 engine->last_retired_context = request->ctx; 443 engine->last_retired_context = request->ctx;
444 444
445 spin_lock_irq(&request->lock); 445 spin_lock_irq(&request->lock);
446 if (request->waitboost)
447 atomic_dec(&request->i915->gt_pm.rps.num_waiters);
448 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags)) 446 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
449 dma_fence_signal_locked(&request->fence); 447 dma_fence_signal_locked(&request->fence);
450 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 448 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
451 intel_engine_cancel_signaling(request); 449 intel_engine_cancel_signaling(request);
450 if (request->waitboost) {
451 GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters));
452 atomic_dec(&request->i915->gt_pm.rps.num_waiters);
453 }
452 spin_unlock_irq(&request->lock); 454 spin_unlock_irq(&request->lock);
453 455
454 i915_priotree_fini(request->i915, &request->priotree); 456 i915_priotree_fini(request->i915, &request->priotree);
@@ -916,9 +918,9 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
916 918
917/** 919/**
918 * i915_gem_request_await_object - set this request to (async) wait upon a bo 920 * i915_gem_request_await_object - set this request to (async) wait upon a bo
919 *
920 * @to: request we are wishing to use 921 * @to: request we are wishing to use
921 * @obj: object which may be in use on another ring. 922 * @obj: object which may be in use on another ring.
923 * @write: whether the wait is on behalf of a writer
922 * 924 *
923 * This code is meant to abstract object synchronization with the GPU. 925 * This code is meant to abstract object synchronization with the GPU.
924 * Conceptually we serialise writes between engines inside the GPU. 926 * Conceptually we serialise writes between engines inside the GPU.
@@ -993,7 +995,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
993 lockdep_assert_held(&request->i915->drm.struct_mutex); 995 lockdep_assert_held(&request->i915->drm.struct_mutex);
994 trace_i915_gem_request_add(request); 996 trace_i915_gem_request_add(request);
995 997
996 /* Make sure that no request gazumped us - if it was allocated after 998 /*
999 * Make sure that no request gazumped us - if it was allocated after
997 * our i915_gem_request_alloc() and called __i915_add_request() before 1000 * our i915_gem_request_alloc() and called __i915_add_request() before
998 * us, the timeline will hold its seqno which is later than ours. 1001 * us, the timeline will hold its seqno which is later than ours.
999 */ 1002 */
@@ -1020,7 +1023,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
1020 WARN(err, "engine->emit_flush() failed: %d!\n", err); 1023 WARN(err, "engine->emit_flush() failed: %d!\n", err);
1021 } 1024 }
1022 1025
1023 /* Record the position of the start of the breadcrumb so that 1026 /*
1027 * Record the position of the start of the breadcrumb so that
1024 * should we detect the updated seqno part-way through the 1028 * should we detect the updated seqno part-way through the
1025 * GPU processing the request, we never over-estimate the 1029 * GPU processing the request, we never over-estimate the
1026 * position of the ring's HEAD. 1030 * position of the ring's HEAD.
@@ -1029,7 +1033,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
1029 GEM_BUG_ON(IS_ERR(cs)); 1033 GEM_BUG_ON(IS_ERR(cs));
1030 request->postfix = intel_ring_offset(request, cs); 1034 request->postfix = intel_ring_offset(request, cs);
1031 1035
1032 /* Seal the request and mark it as pending execution. Note that 1036 /*
1037 * Seal the request and mark it as pending execution. Note that
1033 * we may inspect this state, without holding any locks, during 1038 * we may inspect this state, without holding any locks, during
1034 * hangcheck. Hence we apply the barrier to ensure that we do not 1039 * hangcheck. Hence we apply the barrier to ensure that we do not
1035 * see a more recent value in the hws than we are tracking. 1040 * see a more recent value in the hws than we are tracking.
@@ -1037,7 +1042,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
1037 1042
1038 prev = i915_gem_active_raw(&timeline->last_request, 1043 prev = i915_gem_active_raw(&timeline->last_request,
1039 &request->i915->drm.struct_mutex); 1044 &request->i915->drm.struct_mutex);
1040 if (prev) { 1045 if (prev && !i915_gem_request_completed(prev)) {
1041 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 1046 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
1042 &request->submitq); 1047 &request->submitq);
1043 if (engine->schedule) 1048 if (engine->schedule)
@@ -1057,7 +1062,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
1057 list_add_tail(&request->ring_link, &ring->request_list); 1062 list_add_tail(&request->ring_link, &ring->request_list);
1058 request->emitted_jiffies = jiffies; 1063 request->emitted_jiffies = jiffies;
1059 1064
1060 /* Let the backend know a new request has arrived that may need 1065 /*
1066 * Let the backend know a new request has arrived that may need
1061 * to adjust the existing execution schedule due to a high priority 1067 * to adjust the existing execution schedule due to a high priority
1062 * request - i.e. we may want to preempt the current request in order 1068 * request - i.e. we may want to preempt the current request in order
1063 * to run a high priority dependency chain *before* we can execute this 1069 * to run a high priority dependency chain *before* we can execute this
@@ -1073,6 +1079,26 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
1073 local_bh_disable(); 1079 local_bh_disable();
1074 i915_sw_fence_commit(&request->submit); 1080 i915_sw_fence_commit(&request->submit);
1075 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 1081 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1082
1083 /*
1084 * In typical scenarios, we do not expect the previous request on
1085 * the timeline to be still tracked by timeline->last_request if it
1086 * has been completed. If the completed request is still here, that
1087 * implies that request retirement is a long way behind submission,
1088 * suggesting that we haven't been retiring frequently enough from
1089 * the combination of retire-before-alloc, waiters and the background
1090 * retirement worker. So if the last request on this timeline was
1091 * already completed, do a catch up pass, flushing the retirement queue
1092 * up to this client. Since we have now moved the heaviest operations
1093 * during retirement onto secondary workers, such as freeing objects
1094 * or contexts, retiring a bunch of requests is mostly list management
1095 * (and cache misses), and so we should not be overly penalizing this
1096 * client by performing excess work, though we may still performing
1097 * work on behalf of others -- but instead we should benefit from
1098 * improved resource management. (Well, that's the theory at least.)
1099 */
1100 if (prev && i915_gem_request_completed(prev))
1101 i915_gem_request_retire_upto(prev);
1076} 1102}
1077 1103
1078static unsigned long local_clock_us(unsigned int *cpu) 1104static unsigned long local_clock_us(unsigned int *cpu)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d3f222fa6356..62aa67960bf4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -356,7 +356,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
356 reserved_base = 0; 356 reserved_base = 0;
357 reserved_size = 0; 357 reserved_size = 0;
358 358
359 switch (INTEL_INFO(dev_priv)->gen) { 359 switch (INTEL_GEN(dev_priv)) {
360 case 2: 360 case 2:
361 case 3: 361 case 3:
362 break; 362 break;
@@ -516,7 +516,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
516 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 516 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
517 517
518 obj->stolen = stolen; 518 obj->stolen = stolen;
519 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 519 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
520 cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; 520 cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
521 i915_gem_object_set_cache_coherency(obj, cache_level); 521 i915_gem_object_set_cache_coherency(obj, cache_level);
522 522
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 382a77a1097e..d596a8302ca3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -721,7 +721,7 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
721 .release = i915_gem_userptr_release, 721 .release = i915_gem_userptr_release,
722}; 722};
723 723
724/** 724/*
725 * Creates a new mm object that wraps some normal memory from the process 725 * Creates a new mm object that wraps some normal memory from the process
726 * context - user memory. 726 * context - user memory.
727 * 727 *
@@ -757,7 +757,9 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
757 * dma-buf instead. 757 * dma-buf instead.
758 */ 758 */
759int 759int
760i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 760i915_gem_userptr_ioctl(struct drm_device *dev,
761 void *data,
762 struct drm_file *file)
761{ 763{
762 struct drm_i915_private *dev_priv = to_i915(dev); 764 struct drm_i915_private *dev_priv = to_i915(dev);
763 struct drm_i915_gem_userptr *args = data; 765 struct drm_i915_gem_userptr *args = data;
@@ -796,8 +798,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
796 798
797 drm_gem_private_object_init(dev, &obj->base, args->user_size); 799 drm_gem_private_object_init(dev, &obj->base, args->user_size);
798 i915_gem_object_init(obj, &i915_gem_userptr_ops); 800 i915_gem_object_init(obj, &i915_gem_userptr_ops);
799 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 801 obj->read_domains = I915_GEM_DOMAIN_CPU;
800 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 802 obj->write_domain = I915_GEM_DOMAIN_CPU;
801 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 803 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
802 804
803 obj->userptr.ptr = args->user_ptr; 805 obj->userptr.ptr = args->user_ptr;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 67c902412193..65c0bef73ee5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -579,11 +579,13 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
579} 579}
580 580
581static void err_print_capabilities(struct drm_i915_error_state_buf *m, 581static void err_print_capabilities(struct drm_i915_error_state_buf *m,
582 const struct intel_device_info *info) 582 const struct intel_device_info *info,
583 const struct intel_driver_caps *caps)
583{ 584{
584 struct drm_printer p = i915_error_printer(m); 585 struct drm_printer p = i915_error_printer(m);
585 586
586 intel_device_info_dump_flags(info, &p); 587 intel_device_info_dump_flags(info, &p);
588 intel_driver_caps_print(caps, &p);
587} 589}
588 590
589static void err_print_params(struct drm_i915_error_state_buf *m, 591static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -808,7 +810,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
808 if (error->display) 810 if (error->display)
809 intel_display_print_error_state(m, error->display); 811 intel_display_print_error_state(m, error->display);
810 812
811 err_print_capabilities(m, &error->device_info); 813 err_print_capabilities(m, &error->device_info, &error->driver_caps);
812 err_print_params(m, &error->params); 814 err_print_params(m, &error->params);
813 err_print_uc(m, &error->uc); 815 err_print_uc(m, &error->uc);
814 816
@@ -1019,8 +1021,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
1019 err->engine = __active_get_engine_id(&obj->frontbuffer_write); 1021 err->engine = __active_get_engine_id(&obj->frontbuffer_write);
1020 1022
1021 err->gtt_offset = vma->node.start; 1023 err->gtt_offset = vma->node.start;
1022 err->read_domains = obj->base.read_domains; 1024 err->read_domains = obj->read_domains;
1023 err->write_domain = obj->base.write_domain; 1025 err->write_domain = obj->write_domain;
1024 err->fence_reg = vma->fence ? vma->fence->id : -1; 1026 err->fence_reg = vma->fence ? vma->fence->id : -1;
1025 err->tiling = i915_gem_object_get_tiling(obj); 1027 err->tiling = i915_gem_object_get_tiling(obj);
1026 err->dirty = obj->mm.dirty; 1028 err->dirty = obj->mm.dirty;
@@ -1740,6 +1742,7 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1740 memcpy(&error->device_info, 1742 memcpy(&error->device_info,
1741 INTEL_INFO(dev_priv), 1743 INTEL_INFO(dev_priv),
1742 sizeof(error->device_info)); 1744 sizeof(error->device_info));
1745 error->driver_caps = dev_priv->caps;
1743} 1746}
1744 1747
1745static __always_inline void dup_param(const char *type, void *x) 1748static __always_inline void dup_param(const char *type, void *x)
@@ -1802,14 +1805,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
1802 1805
1803/** 1806/**
1804 * i915_capture_error_state - capture an error record for later analysis 1807 * i915_capture_error_state - capture an error record for later analysis
1805 * @dev: drm device 1808 * @i915: i915 device
1809 * @engine_mask: the mask of engines triggering the hang
1810 * @error_msg: a message to insert into the error capture header
1806 * 1811 *
1807 * Should be called when an error is detected (either a hang or an error 1812 * Should be called when an error is detected (either a hang or an error
1808 * interrupt) to capture error state from the time of the error. Fills 1813 * interrupt) to capture error state from the time of the error. Fills
1809 * out a structure which becomes available in debugfs for user level tools 1814 * out a structure which becomes available in debugfs for user level tools
1810 * to pick up. 1815 * to pick up.
1811 */ 1816 */
1812void i915_capture_error_state(struct drm_i915_private *dev_priv, 1817void i915_capture_error_state(struct drm_i915_private *i915,
1813 u32 engine_mask, 1818 u32 engine_mask,
1814 const char *error_msg) 1819 const char *error_msg)
1815{ 1820{
@@ -1820,25 +1825,25 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1820 if (!i915_modparams.error_capture) 1825 if (!i915_modparams.error_capture)
1821 return; 1826 return;
1822 1827
1823 if (READ_ONCE(dev_priv->gpu_error.first_error)) 1828 if (READ_ONCE(i915->gpu_error.first_error))
1824 return; 1829 return;
1825 1830
1826 error = i915_capture_gpu_state(dev_priv); 1831 error = i915_capture_gpu_state(i915);
1827 if (!error) { 1832 if (!error) {
1828 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1833 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1829 return; 1834 return;
1830 } 1835 }
1831 1836
1832 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); 1837 i915_error_capture_msg(i915, error, engine_mask, error_msg);
1833 DRM_INFO("%s\n", error->error_msg); 1838 DRM_INFO("%s\n", error->error_msg);
1834 1839
1835 if (!error->simulated) { 1840 if (!error->simulated) {
1836 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1841 spin_lock_irqsave(&i915->gpu_error.lock, flags);
1837 if (!dev_priv->gpu_error.first_error) { 1842 if (!i915->gpu_error.first_error) {
1838 dev_priv->gpu_error.first_error = error; 1843 i915->gpu_error.first_error = error;
1839 error = NULL; 1844 error = NULL;
1840 } 1845 }
1841 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1846 spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
1842 } 1847 }
1843 1848
1844 if (error) { 1849 if (error) {
@@ -1853,7 +1858,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1853 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1858 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1854 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1859 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1855 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", 1860 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1856 dev_priv->drm.primary->index); 1861 i915->drm.primary->index);
1857 warned = true; 1862 warned = true;
1858 } 1863 }
1859} 1864}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 97f3a5640289..0e5c580d117c 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -1,11 +1,6 @@
1/** 1/*
2 * \file i915_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the i915 DRM. 2 * 32-bit ioctl compatibility routines for the i915 DRM.
5 * 3 *
6 * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
7 *
8 *
9 * Copyright (C) Paul Mackerras 2005 4 * Copyright (C) Paul Mackerras 2005
10 * Copyright (C) Alan Hourihane 2005 5 * Copyright (C) Alan Hourihane 2005
11 * All Rights Reserved. 6 * All Rights Reserved.
@@ -28,6 +23,8 @@
28 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE. 25 * IN THE SOFTWARE.
26 *
27 * Author: Alan Hourihane <alanh@fairlite.demon.co.uk>
31 */ 28 */
32#include <linux/compat.h> 29#include <linux/compat.h>
33 30
@@ -55,10 +52,10 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
55 return -EFAULT; 52 return -EFAULT;
56 53
57 request = compat_alloc_user_space(sizeof(*request)); 54 request = compat_alloc_user_space(sizeof(*request));
58 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) 55 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
59 || __put_user(req32.param, &request->param) 56 __put_user(req32.param, &request->param) ||
60 || __put_user((void __user *)(unsigned long)req32.value, 57 __put_user((void __user *)(unsigned long)req32.value,
61 &request->value)) 58 &request->value))
62 return -EFAULT; 59 return -EFAULT;
63 60
64 return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, 61 return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
@@ -70,13 +67,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
70}; 67};
71 68
72/** 69/**
70 * i915_compat_ioctl - handle the mistakes of the past
71 * @filp: the file pointer
72 * @cmd: the ioctl command (and encoded flags)
73 * @arg: the ioctl argument (from userspace)
74 *
73 * Called whenever a 32-bit process running under a 64-bit kernel 75 * Called whenever a 32-bit process running under a 64-bit kernel
74 * performs an ioctl on /dev/dri/card<n>. 76 * performs an ioctl on /dev/dri/card<n>.
75 *
76 * \param filp file pointer.
77 * \param cmd command.
78 * \param arg user argument.
79 * \return zero on success or negative number on failure.
80 */ 77 */
81long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 78long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
82{ 79{
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b886bd459acc..17de6cef2a30 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1413,64 +1413,73 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1413 tasklet_hi_schedule(&execlists->tasklet); 1413 tasklet_hi_schedule(&execlists->tasklet);
1414} 1414}
1415 1415
1416static void gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1416static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1417 u32 master_ctl, u32 gt_iir[4]) 1417 u32 master_ctl, u32 gt_iir[4])
1418{ 1418{
1419 void __iomem * const regs = i915->regs;
1420
1421#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1422 GEN8_GT_BCS_IRQ | \
1423 GEN8_GT_VCS1_IRQ | \
1424 GEN8_GT_VCS2_IRQ | \
1425 GEN8_GT_VECS_IRQ | \
1426 GEN8_GT_PM_IRQ | \
1427 GEN8_GT_GUC_IRQ)
1428
1419 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1429 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1420 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1430 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1421 if (gt_iir[0]) 1431 if (likely(gt_iir[0]))
1422 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1432 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1423 } 1433 }
1424 1434
1425 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1435 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1426 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1436 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1427 if (gt_iir[1]) 1437 if (likely(gt_iir[1]))
1428 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1438 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1429 } 1439 }
1430 1440
1431 if (master_ctl & GEN8_GT_VECS_IRQ) { 1441 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1432 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1442 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1433 if (gt_iir[3]) 1443 if (likely(gt_iir[2] & (i915->pm_rps_events |
1434 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1444 i915->pm_guc_events)))
1445 raw_reg_write(regs, GEN8_GT_IIR(2),
1446 gt_iir[2] & (i915->pm_rps_events |
1447 i915->pm_guc_events));
1435 } 1448 }
1436 1449
1437 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1450 if (master_ctl & GEN8_GT_VECS_IRQ) {
1438 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1451 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1439 if (gt_iir[2] & (dev_priv->pm_rps_events | 1452 if (likely(gt_iir[3]))
1440 dev_priv->pm_guc_events)) { 1453 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1441 I915_WRITE_FW(GEN8_GT_IIR(2),
1442 gt_iir[2] & (dev_priv->pm_rps_events |
1443 dev_priv->pm_guc_events));
1444 }
1445 } 1454 }
1446} 1455}
1447 1456
1448static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1457static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1449 u32 gt_iir[4]) 1458 u32 master_ctl, u32 gt_iir[4])
1450{ 1459{
1451 if (gt_iir[0]) { 1460 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1452 gen8_cs_irq_handler(dev_priv->engine[RCS], 1461 gen8_cs_irq_handler(i915->engine[RCS],
1453 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1462 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1454 gen8_cs_irq_handler(dev_priv->engine[BCS], 1463 gen8_cs_irq_handler(i915->engine[BCS],
1455 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1464 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1456 } 1465 }
1457 1466
1458 if (gt_iir[1]) { 1467 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1459 gen8_cs_irq_handler(dev_priv->engine[VCS], 1468 gen8_cs_irq_handler(i915->engine[VCS],
1460 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1469 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1461 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1470 gen8_cs_irq_handler(i915->engine[VCS2],
1462 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1471 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1463 } 1472 }
1464 1473
1465 if (gt_iir[3]) 1474 if (master_ctl & GEN8_GT_VECS_IRQ) {
1466 gen8_cs_irq_handler(dev_priv->engine[VECS], 1475 gen8_cs_irq_handler(i915->engine[VECS],
1467 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1476 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1477 }
1468 1478
1469 if (gt_iir[2] & dev_priv->pm_rps_events) 1479 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1470 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1480 gen6_rps_irq_handler(i915, gt_iir[2]);
1471 1481 gen9_guc_irq_handler(i915, gt_iir[2]);
1472 if (gt_iir[2] & dev_priv->pm_guc_events) 1482 }
1473 gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1474} 1483}
1475 1484
1476static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1485static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -2085,9 +2094,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2085 2094
2086 do { 2095 do {
2087 u32 master_ctl, iir; 2096 u32 master_ctl, iir;
2088 u32 gt_iir[4] = {};
2089 u32 pipe_stats[I915_MAX_PIPES] = {}; 2097 u32 pipe_stats[I915_MAX_PIPES] = {};
2090 u32 hotplug_status = 0; 2098 u32 hotplug_status = 0;
2099 u32 gt_iir[4];
2091 u32 ier = 0; 2100 u32 ier = 0;
2092 2101
2093 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2102 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
@@ -2140,7 +2149,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2140 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2149 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2141 POSTING_READ(GEN8_MASTER_IRQ); 2150 POSTING_READ(GEN8_MASTER_IRQ);
2142 2151
2143 gen8_gt_irq_handler(dev_priv, gt_iir); 2152 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2144 2153
2145 if (hotplug_status) 2154 if (hotplug_status)
2146 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2155 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2675,10 +2684,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2675 2684
2676static irqreturn_t gen8_irq_handler(int irq, void *arg) 2685static irqreturn_t gen8_irq_handler(int irq, void *arg)
2677{ 2686{
2678 struct drm_device *dev = arg; 2687 struct drm_i915_private *dev_priv = to_i915(arg);
2679 struct drm_i915_private *dev_priv = to_i915(dev);
2680 u32 master_ctl; 2688 u32 master_ctl;
2681 u32 gt_iir[4] = {}; 2689 u32 gt_iir[4];
2682 2690
2683 if (!intel_irqs_enabled(dev_priv)) 2691 if (!intel_irqs_enabled(dev_priv))
2684 return IRQ_NONE; 2692 return IRQ_NONE;
@@ -2690,18 +2698,19 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2690 2698
2691 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2699 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2692 2700
2693 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2694 disable_rpm_wakeref_asserts(dev_priv);
2695
2696 /* Find, clear, then process each source of interrupt */ 2701 /* Find, clear, then process each source of interrupt */
2697 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2702 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2698 gen8_gt_irq_handler(dev_priv, gt_iir); 2703
2699 gen8_de_irq_handler(dev_priv, master_ctl); 2704 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2705 if (master_ctl & ~GEN8_GT_IRQS) {
2706 disable_rpm_wakeref_asserts(dev_priv);
2707 gen8_de_irq_handler(dev_priv, master_ctl);
2708 enable_rpm_wakeref_asserts(dev_priv);
2709 }
2700 2710
2701 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2711 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2702 POSTING_READ_FW(GEN8_MASTER_IRQ);
2703 2712
2704 enable_rpm_wakeref_asserts(dev_priv); 2713 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2705 2714
2706 return IRQ_HANDLED; 2715 return IRQ_HANDLED;
2707} 2716}
@@ -2951,6 +2960,12 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2951 ilk_enable_display_irq(dev_priv, bit); 2960 ilk_enable_display_irq(dev_priv, bit);
2952 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2961 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2953 2962
2963 /* Even though there is no DMC, frame counter can get stuck when
2964 * PSR is active as no frames are generated.
2965 */
2966 if (HAS_PSR(dev_priv))
2967 drm_vblank_restore(dev, pipe);
2968
2954 return 0; 2969 return 0;
2955} 2970}
2956 2971
@@ -2963,6 +2978,12 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2963 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2978 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2964 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2979 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2965 2980
2981 /* Even if there is no DMC, frame counter can get stuck when
2982 * PSR is active as no frames are generated, so check only for PSR.
2983 */
2984 if (HAS_PSR(dev_priv))
2985 drm_vblank_restore(dev, pipe);
2986
2966 return 0; 2987 return 0;
2967} 2988}
2968 2989
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 42ff06fe54a3..792facdb6702 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
84void 84void
85i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) 85i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
86{ 86{
87 strncpy(dev_priv->perf.oa.test_config.uuid, 87 strlcpy(dev_priv->perf.oa.test_config.uuid,
88 "577e8e2c-3fa0-4875-8743-3538d585e3b0", 88 "577e8e2c-3fa0-4875-8743-3538d585e3b0",
89 UUID_STRING_LEN); 89 sizeof(dev_priv->perf.oa.test_config.uuid));
90 dev_priv->perf.oa.test_config.id = 1; 90 dev_priv->perf.oa.test_config.id = 1;
91 91
92 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; 92 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ff0ac3627cc4..ba9140c87cc0 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
96void 96void
97i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) 97i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
98{ 98{
99 strncpy(dev_priv->perf.oa.test_config.uuid, 99 strlcpy(dev_priv->perf.oa.test_config.uuid,
100 "db41edd4-d8e7-4730-ad11-b9a2d6833503", 100 "db41edd4-d8e7-4730-ad11-b9a2d6833503",
101 UUID_STRING_LEN); 101 sizeof(dev_priv->perf.oa.test_config.uuid));
102 dev_priv->perf.oa.test_config.id = 1; 102 dev_priv->perf.oa.test_config.id = 1;
103 103
104 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; 104 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4e7a10c89782..1eaabf28d7b7 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -29,6 +29,9 @@
29#include "i915_drv.h" 29#include "i915_drv.h"
30#include "i915_selftest.h" 30#include "i915_selftest.h"
31 31
32#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
33#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
34
32#define GEN_DEFAULT_PIPEOFFSETS \ 35#define GEN_DEFAULT_PIPEOFFSETS \
33 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 36 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
34 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 37 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
@@ -63,7 +66,8 @@
63 .page_sizes = I915_GTT_PAGE_SIZE_4K 66 .page_sizes = I915_GTT_PAGE_SIZE_4K
64 67
65#define GEN2_FEATURES \ 68#define GEN2_FEATURES \
66 .gen = 2, .num_pipes = 1, \ 69 GEN(2), \
70 .num_pipes = 1, \
67 .has_overlay = 1, .overlay_needs_physical = 1, \ 71 .has_overlay = 1, .overlay_needs_physical = 1, \
68 .has_gmch_display = 1, \ 72 .has_gmch_display = 1, \
69 .hws_needs_physical = 1, \ 73 .hws_needs_physical = 1, \
@@ -76,19 +80,20 @@
76 80
77static const struct intel_device_info intel_i830_info = { 81static const struct intel_device_info intel_i830_info = {
78 GEN2_FEATURES, 82 GEN2_FEATURES,
79 .platform = INTEL_I830, 83 PLATFORM(INTEL_I830),
80 .is_mobile = 1, .cursor_needs_physical = 1, 84 .is_mobile = 1, .cursor_needs_physical = 1,
81 .num_pipes = 2, /* legal, last one wins */ 85 .num_pipes = 2, /* legal, last one wins */
82}; 86};
83 87
84static const struct intel_device_info intel_i845g_info = { 88static const struct intel_device_info intel_i845g_info = {
85 GEN2_FEATURES, 89 GEN2_FEATURES,
86 .platform = INTEL_I845G, 90 PLATFORM(INTEL_I845G),
87}; 91};
88 92
89static const struct intel_device_info intel_i85x_info = { 93static const struct intel_device_info intel_i85x_info = {
90 GEN2_FEATURES, 94 GEN2_FEATURES,
91 .platform = INTEL_I85X, .is_mobile = 1, 95 PLATFORM(INTEL_I85X),
96 .is_mobile = 1,
92 .num_pipes = 2, /* legal, last one wins */ 97 .num_pipes = 2, /* legal, last one wins */
93 .cursor_needs_physical = 1, 98 .cursor_needs_physical = 1,
94 .has_fbc = 1, 99 .has_fbc = 1,
@@ -96,11 +101,12 @@ static const struct intel_device_info intel_i85x_info = {
96 101
97static const struct intel_device_info intel_i865g_info = { 102static const struct intel_device_info intel_i865g_info = {
98 GEN2_FEATURES, 103 GEN2_FEATURES,
99 .platform = INTEL_I865G, 104 PLATFORM(INTEL_I865G),
100}; 105};
101 106
102#define GEN3_FEATURES \ 107#define GEN3_FEATURES \
103 .gen = 3, .num_pipes = 2, \ 108 GEN(3), \
109 .num_pipes = 2, \
104 .has_gmch_display = 1, \ 110 .has_gmch_display = 1, \
105 .ring_mask = RENDER_RING, \ 111 .ring_mask = RENDER_RING, \
106 .has_snoop = true, \ 112 .has_snoop = true, \
@@ -110,7 +116,8 @@ static const struct intel_device_info intel_i865g_info = {
110 116
111static const struct intel_device_info intel_i915g_info = { 117static const struct intel_device_info intel_i915g_info = {
112 GEN3_FEATURES, 118 GEN3_FEATURES,
113 .platform = INTEL_I915G, .cursor_needs_physical = 1, 119 PLATFORM(INTEL_I915G),
120 .cursor_needs_physical = 1,
114 .has_overlay = 1, .overlay_needs_physical = 1, 121 .has_overlay = 1, .overlay_needs_physical = 1,
115 .hws_needs_physical = 1, 122 .hws_needs_physical = 1,
116 .unfenced_needs_alignment = 1, 123 .unfenced_needs_alignment = 1,
@@ -118,7 +125,7 @@ static const struct intel_device_info intel_i915g_info = {
118 125
119static const struct intel_device_info intel_i915gm_info = { 126static const struct intel_device_info intel_i915gm_info = {
120 GEN3_FEATURES, 127 GEN3_FEATURES,
121 .platform = INTEL_I915GM, 128 PLATFORM(INTEL_I915GM),
122 .is_mobile = 1, 129 .is_mobile = 1,
123 .cursor_needs_physical = 1, 130 .cursor_needs_physical = 1,
124 .has_overlay = 1, .overlay_needs_physical = 1, 131 .has_overlay = 1, .overlay_needs_physical = 1,
@@ -130,7 +137,7 @@ static const struct intel_device_info intel_i915gm_info = {
130 137
131static const struct intel_device_info intel_i945g_info = { 138static const struct intel_device_info intel_i945g_info = {
132 GEN3_FEATURES, 139 GEN3_FEATURES,
133 .platform = INTEL_I945G, 140 PLATFORM(INTEL_I945G),
134 .has_hotplug = 1, .cursor_needs_physical = 1, 141 .has_hotplug = 1, .cursor_needs_physical = 1,
135 .has_overlay = 1, .overlay_needs_physical = 1, 142 .has_overlay = 1, .overlay_needs_physical = 1,
136 .hws_needs_physical = 1, 143 .hws_needs_physical = 1,
@@ -139,7 +146,8 @@ static const struct intel_device_info intel_i945g_info = {
139 146
140static const struct intel_device_info intel_i945gm_info = { 147static const struct intel_device_info intel_i945gm_info = {
141 GEN3_FEATURES, 148 GEN3_FEATURES,
142 .platform = INTEL_I945GM, .is_mobile = 1, 149 PLATFORM(INTEL_I945GM),
150 .is_mobile = 1,
143 .has_hotplug = 1, .cursor_needs_physical = 1, 151 .has_hotplug = 1, .cursor_needs_physical = 1,
144 .has_overlay = 1, .overlay_needs_physical = 1, 152 .has_overlay = 1, .overlay_needs_physical = 1,
145 .supports_tv = 1, 153 .supports_tv = 1,
@@ -150,20 +158,22 @@ static const struct intel_device_info intel_i945gm_info = {
150 158
151static const struct intel_device_info intel_g33_info = { 159static const struct intel_device_info intel_g33_info = {
152 GEN3_FEATURES, 160 GEN3_FEATURES,
153 .platform = INTEL_G33, 161 PLATFORM(INTEL_G33),
154 .has_hotplug = 1, 162 .has_hotplug = 1,
155 .has_overlay = 1, 163 .has_overlay = 1,
156}; 164};
157 165
158static const struct intel_device_info intel_pineview_info = { 166static const struct intel_device_info intel_pineview_info = {
159 GEN3_FEATURES, 167 GEN3_FEATURES,
160 .platform = INTEL_PINEVIEW, .is_mobile = 1, 168 PLATFORM(INTEL_PINEVIEW),
169 .is_mobile = 1,
161 .has_hotplug = 1, 170 .has_hotplug = 1,
162 .has_overlay = 1, 171 .has_overlay = 1,
163}; 172};
164 173
165#define GEN4_FEATURES \ 174#define GEN4_FEATURES \
166 .gen = 4, .num_pipes = 2, \ 175 GEN(4), \
176 .num_pipes = 2, \
167 .has_hotplug = 1, \ 177 .has_hotplug = 1, \
168 .has_gmch_display = 1, \ 178 .has_gmch_display = 1, \
169 .ring_mask = RENDER_RING, \ 179 .ring_mask = RENDER_RING, \
@@ -174,7 +184,7 @@ static const struct intel_device_info intel_pineview_info = {
174 184
175static const struct intel_device_info intel_i965g_info = { 185static const struct intel_device_info intel_i965g_info = {
176 GEN4_FEATURES, 186 GEN4_FEATURES,
177 .platform = INTEL_I965G, 187 PLATFORM(INTEL_I965G),
178 .has_overlay = 1, 188 .has_overlay = 1,
179 .hws_needs_physical = 1, 189 .hws_needs_physical = 1,
180 .has_snoop = false, 190 .has_snoop = false,
@@ -182,7 +192,7 @@ static const struct intel_device_info intel_i965g_info = {
182 192
183static const struct intel_device_info intel_i965gm_info = { 193static const struct intel_device_info intel_i965gm_info = {
184 GEN4_FEATURES, 194 GEN4_FEATURES,
185 .platform = INTEL_I965GM, 195 PLATFORM(INTEL_I965GM),
186 .is_mobile = 1, .has_fbc = 1, 196 .is_mobile = 1, .has_fbc = 1,
187 .has_overlay = 1, 197 .has_overlay = 1,
188 .supports_tv = 1, 198 .supports_tv = 1,
@@ -192,20 +202,21 @@ static const struct intel_device_info intel_i965gm_info = {
192 202
193static const struct intel_device_info intel_g45_info = { 203static const struct intel_device_info intel_g45_info = {
194 GEN4_FEATURES, 204 GEN4_FEATURES,
195 .platform = INTEL_G45, 205 PLATFORM(INTEL_G45),
196 .ring_mask = RENDER_RING | BSD_RING, 206 .ring_mask = RENDER_RING | BSD_RING,
197}; 207};
198 208
199static const struct intel_device_info intel_gm45_info = { 209static const struct intel_device_info intel_gm45_info = {
200 GEN4_FEATURES, 210 GEN4_FEATURES,
201 .platform = INTEL_GM45, 211 PLATFORM(INTEL_GM45),
202 .is_mobile = 1, .has_fbc = 1, 212 .is_mobile = 1, .has_fbc = 1,
203 .supports_tv = 1, 213 .supports_tv = 1,
204 .ring_mask = RENDER_RING | BSD_RING, 214 .ring_mask = RENDER_RING | BSD_RING,
205}; 215};
206 216
207#define GEN5_FEATURES \ 217#define GEN5_FEATURES \
208 .gen = 5, .num_pipes = 2, \ 218 GEN(5), \
219 .num_pipes = 2, \
209 .has_hotplug = 1, \ 220 .has_hotplug = 1, \
210 .ring_mask = RENDER_RING | BSD_RING, \ 221 .ring_mask = RENDER_RING | BSD_RING, \
211 .has_snoop = true, \ 222 .has_snoop = true, \
@@ -217,17 +228,18 @@ static const struct intel_device_info intel_gm45_info = {
217 228
218static const struct intel_device_info intel_ironlake_d_info = { 229static const struct intel_device_info intel_ironlake_d_info = {
219 GEN5_FEATURES, 230 GEN5_FEATURES,
220 .platform = INTEL_IRONLAKE, 231 PLATFORM(INTEL_IRONLAKE),
221}; 232};
222 233
223static const struct intel_device_info intel_ironlake_m_info = { 234static const struct intel_device_info intel_ironlake_m_info = {
224 GEN5_FEATURES, 235 GEN5_FEATURES,
225 .platform = INTEL_IRONLAKE, 236 PLATFORM(INTEL_IRONLAKE),
226 .is_mobile = 1, .has_fbc = 1, 237 .is_mobile = 1, .has_fbc = 1,
227}; 238};
228 239
229#define GEN6_FEATURES \ 240#define GEN6_FEATURES \
230 .gen = 6, .num_pipes = 2, \ 241 GEN(6), \
242 .num_pipes = 2, \
231 .has_hotplug = 1, \ 243 .has_hotplug = 1, \
232 .has_fbc = 1, \ 244 .has_fbc = 1, \
233 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 245 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
@@ -241,7 +253,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
241 253
242#define SNB_D_PLATFORM \ 254#define SNB_D_PLATFORM \
243 GEN6_FEATURES, \ 255 GEN6_FEATURES, \
244 .platform = INTEL_SANDYBRIDGE 256 PLATFORM(INTEL_SANDYBRIDGE)
245 257
246static const struct intel_device_info intel_sandybridge_d_gt1_info = { 258static const struct intel_device_info intel_sandybridge_d_gt1_info = {
247 SNB_D_PLATFORM, 259 SNB_D_PLATFORM,
@@ -255,7 +267,7 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = {
255 267
256#define SNB_M_PLATFORM \ 268#define SNB_M_PLATFORM \
257 GEN6_FEATURES, \ 269 GEN6_FEATURES, \
258 .platform = INTEL_SANDYBRIDGE, \ 270 PLATFORM(INTEL_SANDYBRIDGE), \
259 .is_mobile = 1 271 .is_mobile = 1
260 272
261 273
@@ -270,7 +282,8 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
270}; 282};
271 283
272#define GEN7_FEATURES \ 284#define GEN7_FEATURES \
273 .gen = 7, .num_pipes = 3, \ 285 GEN(7), \
286 .num_pipes = 3, \
274 .has_hotplug = 1, \ 287 .has_hotplug = 1, \
275 .has_fbc = 1, \ 288 .has_fbc = 1, \
276 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 289 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
@@ -285,7 +298,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
285 298
286#define IVB_D_PLATFORM \ 299#define IVB_D_PLATFORM \
287 GEN7_FEATURES, \ 300 GEN7_FEATURES, \
288 .platform = INTEL_IVYBRIDGE, \ 301 PLATFORM(INTEL_IVYBRIDGE), \
289 .has_l3_dpf = 1 302 .has_l3_dpf = 1
290 303
291static const struct intel_device_info intel_ivybridge_d_gt1_info = { 304static const struct intel_device_info intel_ivybridge_d_gt1_info = {
@@ -300,7 +313,7 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = {
300 313
301#define IVB_M_PLATFORM \ 314#define IVB_M_PLATFORM \
302 GEN7_FEATURES, \ 315 GEN7_FEATURES, \
303 .platform = INTEL_IVYBRIDGE, \ 316 PLATFORM(INTEL_IVYBRIDGE), \
304 .is_mobile = 1, \ 317 .is_mobile = 1, \
305 .has_l3_dpf = 1 318 .has_l3_dpf = 1
306 319
@@ -316,15 +329,15 @@ static const struct intel_device_info intel_ivybridge_m_gt2_info = {
316 329
317static const struct intel_device_info intel_ivybridge_q_info = { 330static const struct intel_device_info intel_ivybridge_q_info = {
318 GEN7_FEATURES, 331 GEN7_FEATURES,
319 .platform = INTEL_IVYBRIDGE, 332 PLATFORM(INTEL_IVYBRIDGE),
320 .gt = 2, 333 .gt = 2,
321 .num_pipes = 0, /* legal, last one wins */ 334 .num_pipes = 0, /* legal, last one wins */
322 .has_l3_dpf = 1, 335 .has_l3_dpf = 1,
323}; 336};
324 337
325static const struct intel_device_info intel_valleyview_info = { 338static const struct intel_device_info intel_valleyview_info = {
326 .platform = INTEL_VALLEYVIEW, 339 PLATFORM(INTEL_VALLEYVIEW),
327 .gen = 7, 340 GEN(7),
328 .is_lp = 1, 341 .is_lp = 1,
329 .num_pipes = 2, 342 .num_pipes = 2,
330 .has_psr = 1, 343 .has_psr = 1,
@@ -355,7 +368,7 @@ static const struct intel_device_info intel_valleyview_info = {
355 368
356#define HSW_PLATFORM \ 369#define HSW_PLATFORM \
357 G75_FEATURES, \ 370 G75_FEATURES, \
358 .platform = INTEL_HASWELL, \ 371 PLATFORM(INTEL_HASWELL), \
359 .has_l3_dpf = 1 372 .has_l3_dpf = 1
360 373
361static const struct intel_device_info intel_haswell_gt1_info = { 374static const struct intel_device_info intel_haswell_gt1_info = {
@@ -375,6 +388,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
375 388
376#define GEN8_FEATURES \ 389#define GEN8_FEATURES \
377 G75_FEATURES, \ 390 G75_FEATURES, \
391 GEN(8), \
378 BDW_COLORS, \ 392 BDW_COLORS, \
379 .page_sizes = I915_GTT_PAGE_SIZE_4K | \ 393 .page_sizes = I915_GTT_PAGE_SIZE_4K | \
380 I915_GTT_PAGE_SIZE_2M, \ 394 I915_GTT_PAGE_SIZE_2M, \
@@ -385,8 +399,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
385 399
386#define BDW_PLATFORM \ 400#define BDW_PLATFORM \
387 GEN8_FEATURES, \ 401 GEN8_FEATURES, \
388 .gen = 8, \ 402 PLATFORM(INTEL_BROADWELL)
389 .platform = INTEL_BROADWELL
390 403
391static const struct intel_device_info intel_broadwell_gt1_info = { 404static const struct intel_device_info intel_broadwell_gt1_info = {
392 BDW_PLATFORM, 405 BDW_PLATFORM,
@@ -413,11 +426,12 @@ static const struct intel_device_info intel_broadwell_gt3_info = {
413}; 426};
414 427
415static const struct intel_device_info intel_cherryview_info = { 428static const struct intel_device_info intel_cherryview_info = {
416 .gen = 8, .num_pipes = 3, 429 PLATFORM(INTEL_CHERRYVIEW),
430 GEN(8),
431 .num_pipes = 3,
417 .has_hotplug = 1, 432 .has_hotplug = 1,
418 .is_lp = 1, 433 .is_lp = 1,
419 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 434 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
420 .platform = INTEL_CHERRYVIEW,
421 .has_64bit_reloc = 1, 435 .has_64bit_reloc = 1,
422 .has_psr = 1, 436 .has_psr = 1,
423 .has_runtime_pm = 1, 437 .has_runtime_pm = 1,
@@ -443,6 +457,7 @@ static const struct intel_device_info intel_cherryview_info = {
443 457
444#define GEN9_FEATURES \ 458#define GEN9_FEATURES \
445 GEN8_FEATURES, \ 459 GEN8_FEATURES, \
460 GEN(9), \
446 GEN9_DEFAULT_PAGE_SIZES, \ 461 GEN9_DEFAULT_PAGE_SIZES, \
447 .has_logical_ring_preemption = 1, \ 462 .has_logical_ring_preemption = 1, \
448 .has_csr = 1, \ 463 .has_csr = 1, \
@@ -452,8 +467,7 @@ static const struct intel_device_info intel_cherryview_info = {
452 467
453#define SKL_PLATFORM \ 468#define SKL_PLATFORM \
454 GEN9_FEATURES, \ 469 GEN9_FEATURES, \
455 .gen = 9, \ 470 PLATFORM(INTEL_SKYLAKE)
456 .platform = INTEL_SKYLAKE
457 471
458static const struct intel_device_info intel_skylake_gt1_info = { 472static const struct intel_device_info intel_skylake_gt1_info = {
459 SKL_PLATFORM, 473 SKL_PLATFORM,
@@ -481,7 +495,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
481}; 495};
482 496
483#define GEN9_LP_FEATURES \ 497#define GEN9_LP_FEATURES \
484 .gen = 9, \ 498 GEN(9), \
485 .is_lp = 1, \ 499 .is_lp = 1, \
486 .has_hotplug = 1, \ 500 .has_hotplug = 1, \
487 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ 501 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
@@ -513,21 +527,20 @@ static const struct intel_device_info intel_skylake_gt4_info = {
513 527
514static const struct intel_device_info intel_broxton_info = { 528static const struct intel_device_info intel_broxton_info = {
515 GEN9_LP_FEATURES, 529 GEN9_LP_FEATURES,
516 .platform = INTEL_BROXTON, 530 PLATFORM(INTEL_BROXTON),
517 .ddb_size = 512, 531 .ddb_size = 512,
518}; 532};
519 533
520static const struct intel_device_info intel_geminilake_info = { 534static const struct intel_device_info intel_geminilake_info = {
521 GEN9_LP_FEATURES, 535 GEN9_LP_FEATURES,
522 .platform = INTEL_GEMINILAKE, 536 PLATFORM(INTEL_GEMINILAKE),
523 .ddb_size = 1024, 537 .ddb_size = 1024,
524 GLK_COLORS, 538 GLK_COLORS,
525}; 539};
526 540
527#define KBL_PLATFORM \ 541#define KBL_PLATFORM \
528 GEN9_FEATURES, \ 542 GEN9_FEATURES, \
529 .gen = 9, \ 543 PLATFORM(INTEL_KABYLAKE)
530 .platform = INTEL_KABYLAKE
531 544
532static const struct intel_device_info intel_kabylake_gt1_info = { 545static const struct intel_device_info intel_kabylake_gt1_info = {
533 KBL_PLATFORM, 546 KBL_PLATFORM,
@@ -547,8 +560,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
547 560
548#define CFL_PLATFORM \ 561#define CFL_PLATFORM \
549 GEN9_FEATURES, \ 562 GEN9_FEATURES, \
550 .gen = 9, \ 563 PLATFORM(INTEL_COFFEELAKE)
551 .platform = INTEL_COFFEELAKE
552 564
553static const struct intel_device_info intel_coffeelake_gt1_info = { 565static const struct intel_device_info intel_coffeelake_gt1_info = {
554 CFL_PLATFORM, 566 CFL_PLATFORM,
@@ -568,30 +580,32 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
568 580
569#define GEN10_FEATURES \ 581#define GEN10_FEATURES \
570 GEN9_FEATURES, \ 582 GEN9_FEATURES, \
583 GEN(10), \
571 .ddb_size = 1024, \ 584 .ddb_size = 1024, \
572 GLK_COLORS 585 GLK_COLORS
573 586
574static const struct intel_device_info intel_cannonlake_info = { 587static const struct intel_device_info intel_cannonlake_info = {
575 GEN10_FEATURES, 588 GEN10_FEATURES,
576 .is_alpha_support = 1, 589 PLATFORM(INTEL_CANNONLAKE),
577 .platform = INTEL_CANNONLAKE,
578 .gen = 10,
579 .gt = 2, 590 .gt = 2,
580}; 591};
581 592
582#define GEN11_FEATURES \ 593#define GEN11_FEATURES \
583 GEN10_FEATURES, \ 594 GEN10_FEATURES, \
584 .gen = 11, \ 595 GEN(11), \
585 .ddb_size = 2048, \ 596 .ddb_size = 2048, \
586 .has_csr = 0 597 .has_csr = 0
587 598
588static const struct intel_device_info intel_icelake_11_info = { 599static const struct intel_device_info intel_icelake_11_info = {
589 GEN11_FEATURES, 600 GEN11_FEATURES,
590 .platform = INTEL_ICELAKE, 601 PLATFORM(INTEL_ICELAKE),
591 .is_alpha_support = 1, 602 .is_alpha_support = 1,
592 .has_resource_streamer = 0, 603 .has_resource_streamer = 0,
593}; 604};
594 605
606#undef GEN
607#undef PLATFORM
608
595/* 609/*
596 * Make sure any device matches here are from most specific to most 610 * Make sure any device matches here are from most specific to most
597 * general. For example, since the Quanta match is based on the subsystem 611 * general. For example, since the Quanta match is based on the subsystem
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 1c440460255d..964467b03e4d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -415,7 +415,94 @@ static int i915_pmu_event_init(struct perf_event *event)
415 return 0; 415 return 0;
416} 416}
417 417
418static u64 __i915_pmu_event_read(struct perf_event *event) 418static u64 __get_rc6(struct drm_i915_private *i915)
419{
420 u64 val;
421
422 val = intel_rc6_residency_ns(i915,
423 IS_VALLEYVIEW(i915) ?
424 VLV_GT_RENDER_RC6 :
425 GEN6_GT_GFX_RC6);
426
427 if (HAS_RC6p(i915))
428 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
429
430 if (HAS_RC6pp(i915))
431 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
432
433 return val;
434}
435
436static u64 get_rc6(struct drm_i915_private *i915, bool locked)
437{
438#if IS_ENABLED(CONFIG_PM)
439 unsigned long flags;
440 u64 val;
441
442 if (intel_runtime_pm_get_if_in_use(i915)) {
443 val = __get_rc6(i915);
444 intel_runtime_pm_put(i915);
445
446 /*
447 * If we are coming back from being runtime suspended we must
448 * be careful not to report a larger value than returned
449 * previously.
450 */
451
452 if (!locked)
453 spin_lock_irqsave(&i915->pmu.lock, flags);
454
455 if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
456 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
457 i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
458 } else {
459 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
460 }
461
462 if (!locked)
463 spin_unlock_irqrestore(&i915->pmu.lock, flags);
464 } else {
465 struct pci_dev *pdev = i915->drm.pdev;
466 struct device *kdev = &pdev->dev;
467 unsigned long flags2;
468
469 /*
470 * We are runtime suspended.
471 *
472 * Report the delta from when the device was suspended to now,
473 * on top of the last known real value, as the approximated RC6
474 * counter value.
475 */
476 if (!locked)
477 spin_lock_irqsave(&i915->pmu.lock, flags);
478
479 spin_lock_irqsave(&kdev->power.lock, flags2);
480
481 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
482 i915->pmu.suspended_jiffies_last =
483 kdev->power.suspended_jiffies;
484
485 val = kdev->power.suspended_jiffies -
486 i915->pmu.suspended_jiffies_last;
487 val += jiffies - kdev->power.accounting_timestamp;
488
489 spin_unlock_irqrestore(&kdev->power.lock, flags2);
490
491 val = jiffies_to_nsecs(val);
492 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
493 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
494
495 if (!locked)
496 spin_unlock_irqrestore(&i915->pmu.lock, flags);
497 }
498
499 return val;
500#else
501 return __get_rc6(i915);
502#endif
503}
504
505static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
419{ 506{
420 struct drm_i915_private *i915 = 507 struct drm_i915_private *i915 =
421 container_of(event->pmu, typeof(*i915), pmu.base); 508 container_of(event->pmu, typeof(*i915), pmu.base);
@@ -453,18 +540,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
453 val = count_interrupts(i915); 540 val = count_interrupts(i915);
454 break; 541 break;
455 case I915_PMU_RC6_RESIDENCY: 542 case I915_PMU_RC6_RESIDENCY:
456 intel_runtime_pm_get(i915); 543 val = get_rc6(i915, locked);
457 val = intel_rc6_residency_ns(i915,
458 IS_VALLEYVIEW(i915) ?
459 VLV_GT_RENDER_RC6 :
460 GEN6_GT_GFX_RC6);
461 if (HAS_RC6p(i915))
462 val += intel_rc6_residency_ns(i915,
463 GEN6_GT_GFX_RC6p);
464 if (HAS_RC6pp(i915))
465 val += intel_rc6_residency_ns(i915,
466 GEN6_GT_GFX_RC6pp);
467 intel_runtime_pm_put(i915);
468 break; 544 break;
469 } 545 }
470 } 546 }
@@ -479,7 +555,7 @@ static void i915_pmu_event_read(struct perf_event *event)
479 555
480again: 556again:
481 prev = local64_read(&hwc->prev_count); 557 prev = local64_read(&hwc->prev_count);
482 new = __i915_pmu_event_read(event); 558 new = __i915_pmu_event_read(event, false);
483 559
484 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 560 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
485 goto again; 561 goto again;
@@ -534,7 +610,7 @@ static void i915_pmu_enable(struct perf_event *event)
534 * for all listeners. Even when the event was already enabled and has 610 * for all listeners. Even when the event was already enabled and has
535 * an existing non-zero value. 611 * an existing non-zero value.
536 */ 612 */
537 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 613 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
538 614
539 spin_unlock_irqrestore(&i915->pmu.lock, flags); 615 spin_unlock_irqrestore(&i915->pmu.lock, flags);
540} 616}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 5a2e013a56bb..aa1b1a987ea1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -27,6 +27,8 @@
27enum { 27enum {
28 __I915_SAMPLE_FREQ_ACT = 0, 28 __I915_SAMPLE_FREQ_ACT = 0,
29 __I915_SAMPLE_FREQ_REQ, 29 __I915_SAMPLE_FREQ_REQ,
30 __I915_SAMPLE_RC6,
31 __I915_SAMPLE_RC6_ESTIMATED,
30 __I915_NUM_PMU_SAMPLERS 32 __I915_NUM_PMU_SAMPLERS
31}; 33};
32 34
@@ -95,6 +97,10 @@ struct i915_pmu {
95 */ 97 */
96 struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; 98 struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
97 /** 99 /**
100 * @suspended_jiffies_last: Cached suspend time from PM core.
101 */
102 unsigned long suspended_jiffies_last;
103 /**
98 * @i915_attr: Memory block holding device attributes. 104 * @i915_attr: Memory block holding device attributes.
99 */ 105 */
100 void *i915_attr; 106 void *i915_attr;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e9c79b560823..1412abcb27d4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1906,6 +1906,11 @@ enum i915_power_well_id {
1906#define CL_POWER_DOWN_ENABLE (1 << 4) 1906#define CL_POWER_DOWN_ENABLE (1 << 4)
1907#define SUS_CLOCK_CONFIG (3 << 0) 1907#define SUS_CLOCK_CONFIG (3 << 0)
1908 1908
1909#define _ICL_PORT_CL_DW5_A 0x162014
1910#define _ICL_PORT_CL_DW5_B 0x6C014
1911#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
1912 _ICL_PORT_CL_DW5_B)
1913
1909#define _PORT_CL1CM_DW9_A 0x162024 1914#define _PORT_CL1CM_DW9_A 0x162024
1910#define _PORT_CL1CM_DW9_BC 0x6C024 1915#define _PORT_CL1CM_DW9_BC 0x6C024
1911#define IREF0RC_OFFSET_SHIFT 8 1916#define IREF0RC_OFFSET_SHIFT 8
@@ -2029,7 +2034,7 @@ enum i915_power_well_id {
2029#define _CNL_PORT_TX_DW5_LN0_AE 0x162454 2034#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
2030#define _CNL_PORT_TX_DW5_LN0_B 0x162654 2035#define _CNL_PORT_TX_DW5_LN0_B 0x162654
2031#define _CNL_PORT_TX_DW5_LN0_C 0x162C54 2036#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
2032#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4 2037#define _CNL_PORT_TX_DW5_LN0_D 0x162E54
2033#define _CNL_PORT_TX_DW5_LN0_F 0x162854 2038#define _CNL_PORT_TX_DW5_LN0_F 0x162854
2034#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ 2039#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
2035 _CNL_PORT_TX_DW5_GRP_AE, \ 2040 _CNL_PORT_TX_DW5_GRP_AE, \
@@ -2060,7 +2065,7 @@ enum i915_power_well_id {
2060#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C 2065#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
2061#define _CNL_PORT_TX_DW7_LN0_B 0x16265C 2066#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
2062#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C 2067#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
2063#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC 2068#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C
2064#define _CNL_PORT_TX_DW7_LN0_F 0x16285C 2069#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
2065#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ 2070#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
2066 _CNL_PORT_TX_DW7_GRP_AE, \ 2071 _CNL_PORT_TX_DW7_GRP_AE, \
@@ -2104,6 +2109,28 @@ enum i915_power_well_id {
2104#define CNL_PORT_COMP_DW9 _MMIO(0x162124) 2109#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
2105#define CNL_PORT_COMP_DW10 _MMIO(0x162128) 2110#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
2106 2111
2112#define _ICL_PORT_COMP_DW0_A 0x162100
2113#define _ICL_PORT_COMP_DW0_B 0x6C100
2114#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
2115 _ICL_PORT_COMP_DW0_B)
2116#define _ICL_PORT_COMP_DW1_A 0x162104
2117#define _ICL_PORT_COMP_DW1_B 0x6C104
2118#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
2119 _ICL_PORT_COMP_DW1_B)
2120#define _ICL_PORT_COMP_DW3_A 0x16210C
2121#define _ICL_PORT_COMP_DW3_B 0x6C10C
2122#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
2123 _ICL_PORT_COMP_DW3_B)
2124#define _ICL_PORT_COMP_DW9_A 0x162124
2125#define _ICL_PORT_COMP_DW9_B 0x6C124
2126#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
2127 _ICL_PORT_COMP_DW9_B)
2128#define _ICL_PORT_COMP_DW10_A 0x162128
2129#define _ICL_PORT_COMP_DW10_B 0x6C128
2130#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
2131 _ICL_PORT_COMP_DW10_A, \
2132 _ICL_PORT_COMP_DW10_B)
2133
2107/* BXT PHY Ref registers */ 2134/* BXT PHY Ref registers */
2108#define _PORT_REF_DW3_A 0x16218C 2135#define _PORT_REF_DW3_A 0x16218C
2109#define _PORT_REF_DW3_BC 0x6C18C 2136#define _PORT_REF_DW3_BC 0x6C18C
@@ -7138,6 +7165,8 @@ enum {
7138#define DISP_DATA_PARTITION_5_6 (1<<6) 7165#define DISP_DATA_PARTITION_5_6 (1<<6)
7139#define DISP_IPC_ENABLE (1<<3) 7166#define DISP_IPC_ENABLE (1<<3)
7140#define DBUF_CTL _MMIO(0x45008) 7167#define DBUF_CTL _MMIO(0x45008)
7168#define DBUF_CTL_S1 _MMIO(0x45008)
7169#define DBUF_CTL_S2 _MMIO(0x44FE8)
7141#define DBUF_POWER_REQUEST (1<<31) 7170#define DBUF_POWER_REQUEST (1<<31)
7142#define DBUF_POWER_STATE (1<<30) 7171#define DBUF_POWER_STATE (1<<30)
7143#define GEN7_MSG_CTL _MMIO(0x45010) 7172#define GEN7_MSG_CTL _MMIO(0x45010)
@@ -7147,8 +7176,9 @@ enum {
7147#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 7176#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
7148 7177
7149#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) 7178#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
7150#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30) 7179#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
7151#define MASK_WAKEMEM (1<<13) 7180#define MASK_WAKEMEM (1 << 13)
7181#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
7152 7182
7153#define SKL_DFSM _MMIO(0x51000) 7183#define SKL_DFSM _MMIO(0x51000)
7154#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) 7184#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
@@ -7160,8 +7190,12 @@ enum {
7160#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) 7190#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
7161#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) 7191#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
7162 7192
7163#define SKL_DSSM _MMIO(0x51004) 7193#define SKL_DSSM _MMIO(0x51004)
7164#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31) 7194#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
7195#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
7196#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
7197#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
7198#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
7165 7199
7166#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) 7200#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
7167#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14) 7201#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
@@ -8794,20 +8828,21 @@ enum skl_power_gate {
8794 8828
8795/* CDCLK_CTL */ 8829/* CDCLK_CTL */
8796#define CDCLK_CTL _MMIO(0x46000) 8830#define CDCLK_CTL _MMIO(0x46000)
8797#define CDCLK_FREQ_SEL_MASK (3<<26) 8831#define CDCLK_FREQ_SEL_MASK (3 << 26)
8798#define CDCLK_FREQ_450_432 (0<<26) 8832#define CDCLK_FREQ_450_432 (0 << 26)
8799#define CDCLK_FREQ_540 (1<<26) 8833#define CDCLK_FREQ_540 (1 << 26)
8800#define CDCLK_FREQ_337_308 (2<<26) 8834#define CDCLK_FREQ_337_308 (2 << 26)
8801#define CDCLK_FREQ_675_617 (3<<26) 8835#define CDCLK_FREQ_675_617 (3 << 26)
8802#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) 8836#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22)
8803#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) 8837#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22)
8804#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) 8838#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22)
8805#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 8839#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22)
8806#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 8840#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22)
8807#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) 8841#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
8808#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19) 8842#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
8809#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) 8843#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
8810#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 8844#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
8845#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
8811#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) 8846#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
8812 8847
8813/* LCPLL_CTL */ 8848/* LCPLL_CTL */
@@ -9716,4 +9751,10 @@ enum skl_power_gate {
9716#define MMCD_PCLA (1 << 31) 9751#define MMCD_PCLA (1 << 31)
9717#define MMCD_HOTSPOT_EN (1 << 27) 9752#define MMCD_HOTSPOT_EN (1 << 27)
9718 9753
9754#define _ICL_PHY_MISC_A 0x64C00
9755#define _ICL_PHY_MISC_B 0x64C04
9756#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \
9757 _ICL_PHY_MISC_B)
9758#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
9759
9719#endif /* _I915_REG_H_ */ 9760#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 0087acf731a8..58f8d0cc125c 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -86,7 +86,7 @@ struct i915_syncmap {
86 86
87/** 87/**
88 * i915_syncmap_init -- initialise the #i915_syncmap 88 * i915_syncmap_init -- initialise the #i915_syncmap
89 * @root - pointer to the #i915_syncmap 89 * @root: pointer to the #i915_syncmap
90 */ 90 */
91void i915_syncmap_init(struct i915_syncmap **root) 91void i915_syncmap_init(struct i915_syncmap **root)
92{ 92{
@@ -139,9 +139,9 @@ static inline bool seqno_later(u32 a, u32 b)
139 139
140/** 140/**
141 * i915_syncmap_is_later -- compare against the last know sync point 141 * i915_syncmap_is_later -- compare against the last know sync point
142 * @root - pointer to the #i915_syncmap 142 * @root: pointer to the #i915_syncmap
143 * @id - the context id (other timeline) we are synchronising to 143 * @id: the context id (other timeline) we are synchronising to
144 * @seqno - the sequence number along the other timeline 144 * @seqno: the sequence number along the other timeline
145 * 145 *
146 * If we have already synchronised this @root timeline with another (@id) then 146 * If we have already synchronised this @root timeline with another (@id) then
147 * we can omit any repeated or earlier synchronisation requests. If the two 147 * we can omit any repeated or earlier synchronisation requests. If the two
@@ -339,9 +339,9 @@ found:
339 339
340/** 340/**
341 * i915_syncmap_set -- mark the most recent syncpoint between contexts 341 * i915_syncmap_set -- mark the most recent syncpoint between contexts
342 * @root - pointer to the #i915_syncmap 342 * @root: pointer to the #i915_syncmap
343 * @id - the context id (other timeline) we have synchronised to 343 * @id: the context id (other timeline) we have synchronised to
344 * @seqno - the sequence number along the other timeline 344 * @seqno: the sequence number along the other timeline
345 * 345 *
346 * When we synchronise this @root timeline with another (@id), we also know 346 * When we synchronise this @root timeline with another (@id), we also know
347 * that we have synchronized with all previous seqno along that timeline. If 347 * that we have synchronized with all previous seqno along that timeline. If
@@ -382,7 +382,7 @@ static void __sync_free(struct i915_syncmap *p)
382 382
383/** 383/**
384 * i915_syncmap_free -- free all memory associated with the syncmap 384 * i915_syncmap_free -- free all memory associated with the syncmap
385 * @root - pointer to the #i915_syncmap 385 * @root: pointer to the #i915_syncmap
386 * 386 *
387 * Either when the timeline is to be freed and we no longer need the sync 387 * Either when the timeline is to be freed and we no longer need the sync
388 * point tracking, or when the fences are all known to be signaled and the 388 * point tracking, or when the fences are all known to be signaled and the
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index d452c327dc1d..e9fb692076d7 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -188,13 +188,14 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
188/** 188/**
189 * intel_crtc_destroy_state - destroy crtc state 189 * intel_crtc_destroy_state - destroy crtc state
190 * @crtc: drm crtc 190 * @crtc: drm crtc
191 * @state: the state to destroy
191 * 192 *
192 * Destroys the crtc state (both common and Intel-specific) for the 193 * Destroys the crtc state (both common and Intel-specific) for the
193 * specified crtc. 194 * specified crtc.
194 */ 195 */
195void 196void
196intel_crtc_destroy_state(struct drm_crtc *crtc, 197intel_crtc_destroy_state(struct drm_crtc *crtc,
197 struct drm_crtc_state *state) 198 struct drm_crtc_state *state)
198{ 199{
199 drm_atomic_helper_crtc_destroy_state(crtc, state); 200 drm_atomic_helper_crtc_destroy_state(crtc, state);
200} 201}
@@ -202,7 +203,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
202/** 203/**
203 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests 204 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
204 * @dev_priv: i915 device 205 * @dev_priv: i915 device
205 * @crtc: intel crtc 206 * @intel_crtc: intel crtc
206 * @crtc_state: incoming crtc_state to validate and setup scalers 207 * @crtc_state: incoming crtc_state to validate and setup scalers
207 * 208 *
208 * This function sets up scalers based on staged scaling requests for 209 * This function sets up scalers based on staged scaling requests for
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 0ee32275994a..7481ce85746b 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,6 +85,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
85 __drm_atomic_helper_plane_duplicate_state(plane, state); 85 __drm_atomic_helper_plane_duplicate_state(plane, state);
86 86
87 intel_state->vma = NULL; 87 intel_state->vma = NULL;
88 intel_state->flags = 0;
88 89
89 return state; 90 return state;
90} 91}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 522d54fecb53..709d6ca68074 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -704,7 +704,7 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
704 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 704 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
705 dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; 705 dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
706 dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; 706 dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
707 } else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) { 707 } else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) {
708 dev_priv->display.audio_codec_enable = hsw_audio_codec_enable; 708 dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
709 dev_priv->display.audio_codec_disable = hsw_audio_codec_disable; 709 dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
710 } else if (HAS_PCH_SPLIT(dev_priv)) { 710 } else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
779{ 779{
780 struct intel_encoder *encoder; 780 struct intel_encoder *encoder;
781 781
782 if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
783 return NULL;
784
785 /* MST */ 782 /* MST */
786 if (pipe >= 0) { 783 if (pipe >= 0) {
784 if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
785 return NULL;
786
787 encoder = dev_priv->av_enc_map[pipe]; 787 encoder = dev_priv->av_enc_map[pipe];
788 /* 788 /*
789 * when bootup, audio driver may not know it is 789 * when bootup, audio driver may not know it is
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4e74aa2f16bc..c5c7530ba157 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -391,7 +391,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
391static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv, 391static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
392 bool alternate) 392 bool alternate)
393{ 393{
394 switch (INTEL_INFO(dev_priv)->gen) { 394 switch (INTEL_GEN(dev_priv)) {
395 case 2: 395 case 2:
396 return alternate ? 66667 : 48000; 396 return alternate ? 66667 : 48000;
397 case 3: 397 case 3:
@@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
947 return 0; 947 return 0;
948} 948}
949 949
950/*
951 * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
952 * skip all delay + gpio operands and stop at the first DSI packet op.
953 */
954static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
955{
956 const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
957 int index, len;
958
959 if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
960 return 0;
961
962 /* index = 1 to skip sequence byte */
963 for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
964 switch (data[index]) {
965 case MIPI_SEQ_ELEM_SEND_PKT:
966 return index == 1 ? 0 : index;
967 case MIPI_SEQ_ELEM_DELAY:
968 len = 5; /* 1 byte for operand + uint32 */
969 break;
970 case MIPI_SEQ_ELEM_GPIO:
971 len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
972 break;
973 default:
974 return 0;
975 }
976 }
977
978 return 0;
979}
980
981/*
982 * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
983 * The deassert must be done before calling intel_dsi_device_ready, so for
984 * these devices we split the init OTP sequence into a deassert sequence and
985 * the actual init OTP part.
986 */
987static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
988{
989 u8 *init_otp;
990 int len;
991
992 /* Limit this to VLV for now. */
993 if (!IS_VALLEYVIEW(dev_priv))
994 return;
995
996 /* Limit this to v1 vid-mode sequences */
997 if (dev_priv->vbt.dsi.config->is_cmd_mode ||
998 dev_priv->vbt.dsi.seq_version != 1)
999 return;
1000
1001 /* Only do this if there are otp and assert seqs and no deassert seq */
1002 if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
1003 !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
1004 dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
1005 return;
1006
1007 /* The deassert-sequence ends at the first DSI packet */
1008 len = get_init_otp_deassert_fragment_len(dev_priv);
1009 if (!len)
1010 return;
1011
1012 DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
1013
1014 /* Copy the fragment, update seq byte and terminate it */
1015 init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
1016 dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
1017 if (!dev_priv->vbt.dsi.deassert_seq)
1018 return;
1019 dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
1020 dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
1021 /* Use the copy for deassert */
1022 dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
1023 dev_priv->vbt.dsi.deassert_seq;
1024 /* Replace the last byte of the fragment with init OTP seq byte */
1025 init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
1026 /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
1027 dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
1028}
1029
950static void 1030static void
951parse_mipi_sequence(struct drm_i915_private *dev_priv, 1031parse_mipi_sequence(struct drm_i915_private *dev_priv,
952 const struct bdb_header *bdb) 1032 const struct bdb_header *bdb)
@@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
1016 dev_priv->vbt.dsi.size = seq_size; 1096 dev_priv->vbt.dsi.size = seq_size;
1017 dev_priv->vbt.dsi.seq_version = sequence->version; 1097 dev_priv->vbt.dsi.seq_version = sequence->version;
1018 1098
1099 fixup_mipi_sequences(dev_priv);
1100
1019 DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); 1101 DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
1020 return; 1102 return;
1021 1103
@@ -1611,6 +1693,29 @@ out:
1611} 1693}
1612 1694
1613/** 1695/**
1696 * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
1697 * @dev_priv: i915 device instance
1698 */
1699void intel_bios_cleanup(struct drm_i915_private *dev_priv)
1700{
1701 kfree(dev_priv->vbt.child_dev);
1702 dev_priv->vbt.child_dev = NULL;
1703 dev_priv->vbt.child_dev_num = 0;
1704 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1705 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1706 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1707 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1708 kfree(dev_priv->vbt.dsi.data);
1709 dev_priv->vbt.dsi.data = NULL;
1710 kfree(dev_priv->vbt.dsi.pps);
1711 dev_priv->vbt.dsi.pps = NULL;
1712 kfree(dev_priv->vbt.dsi.config);
1713 dev_priv->vbt.dsi.config = NULL;
1714 kfree(dev_priv->vbt.dsi.deassert_seq);
1715 dev_priv->vbt.dsi.deassert_seq = NULL;
1716}
1717
1718/**
1614 * intel_bios_is_tv_present - is integrated TV present in VBT 1719 * intel_bios_is_tv_present - is integrated TV present in VBT
1615 * @dev_priv: i915 device instance 1720 * @dev_priv: i915 device instance
1616 * 1721 *
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index efbc627a2a25..b955f7d7bd0f 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -588,29 +588,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
588 spin_unlock_irq(&b->rb_lock); 588 spin_unlock_irq(&b->rb_lock);
589} 589}
590 590
591static bool signal_valid(const struct drm_i915_gem_request *request)
592{
593 return intel_wait_check_request(&request->signaling.wait, request);
594}
595
596static bool signal_complete(const struct drm_i915_gem_request *request) 591static bool signal_complete(const struct drm_i915_gem_request *request)
597{ 592{
598 if (!request) 593 if (!request)
599 return false; 594 return false;
600 595
601 /* If another process served as the bottom-half it may have already 596 /*
602 * signalled that this wait is already completed. 597 * Carefully check if the request is complete, giving time for the
603 */
604 if (intel_wait_complete(&request->signaling.wait))
605 return signal_valid(request);
606
607 /* Carefully check if the request is complete, giving time for the
608 * seqno to be visible or if the GPU hung. 598 * seqno to be visible or if the GPU hung.
609 */ 599 */
610 if (__i915_request_irq_complete(request)) 600 return __i915_request_irq_complete(request);
611 return true;
612
613 return false;
614} 601}
615 602
616static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) 603static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
@@ -712,6 +699,7 @@ static int intel_breadcrumbs_signaler(void *arg)
712 &request->fence.flags)) { 699 &request->fence.flags)) {
713 local_bh_disable(); 700 local_bh_disable();
714 dma_fence_signal(&request->fence); 701 dma_fence_signal(&request->fence);
702 GEM_BUG_ON(!i915_gem_request_completed(request));
715 local_bh_enable(); /* kick start the tasklets */ 703 local_bh_enable(); /* kick start the tasklets */
716 } 704 }
717 705
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index ee788d5be5e3..dc7db8a2caf8 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1778,6 +1778,199 @@ sanitize:
1778 dev_priv->cdclk.hw.vco = -1; 1778 dev_priv->cdclk.hw.vco = -1;
1779} 1779}
1780 1780
1781static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
1782{
1783 int ranges_24[] = { 312000, 552000, 648000 };
1784 int ranges_19_38[] = { 307200, 556800, 652800 };
1785 int *ranges;
1786
1787 switch (ref) {
1788 default:
1789 MISSING_CASE(ref);
1790 case 24000:
1791 ranges = ranges_24;
1792 break;
1793 case 19200:
1794 case 38400:
1795 ranges = ranges_19_38;
1796 break;
1797 }
1798
1799 if (min_cdclk > ranges[1])
1800 return ranges[2];
1801 else if (min_cdclk > ranges[0])
1802 return ranges[1];
1803 else
1804 return ranges[0];
1805}
1806
1807static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
1808{
1809 int ratio;
1810
1811 if (cdclk == dev_priv->cdclk.hw.bypass)
1812 return 0;
1813
1814 switch (cdclk) {
1815 default:
1816 MISSING_CASE(cdclk);
1817 case 307200:
1818 case 556800:
1819 case 652800:
1820 WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
1821 dev_priv->cdclk.hw.ref != 38400);
1822 break;
1823 case 312000:
1824 case 552000:
1825 case 648000:
1826 WARN_ON(dev_priv->cdclk.hw.ref != 24000);
1827 }
1828
1829 ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
1830
1831 return dev_priv->cdclk.hw.ref * ratio;
1832}
1833
1834static void icl_set_cdclk(struct drm_i915_private *dev_priv,
1835 const struct intel_cdclk_state *cdclk_state)
1836{
1837 unsigned int cdclk = cdclk_state->cdclk;
1838 unsigned int vco = cdclk_state->vco;
1839 int ret;
1840
1841 mutex_lock(&dev_priv->pcu_lock);
1842 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
1843 SKL_CDCLK_PREPARE_FOR_CHANGE,
1844 SKL_CDCLK_READY_FOR_CHANGE,
1845 SKL_CDCLK_READY_FOR_CHANGE, 3);
1846 mutex_unlock(&dev_priv->pcu_lock);
1847 if (ret) {
1848 DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
1849 ret);
1850 return;
1851 }
1852
1853 if (dev_priv->cdclk.hw.vco != 0 &&
1854 dev_priv->cdclk.hw.vco != vco)
1855 cnl_cdclk_pll_disable(dev_priv);
1856
1857 if (dev_priv->cdclk.hw.vco != vco)
1858 cnl_cdclk_pll_enable(dev_priv, vco);
1859
1860 I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
1861 skl_cdclk_decimal(cdclk));
1862
1863 mutex_lock(&dev_priv->pcu_lock);
1864 /* TODO: add proper DVFS support. */
1865 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
1866 mutex_unlock(&dev_priv->pcu_lock);
1867
1868 intel_update_cdclk(dev_priv);
1869}
1870
1871static void icl_get_cdclk(struct drm_i915_private *dev_priv,
1872 struct intel_cdclk_state *cdclk_state)
1873{
1874 u32 val;
1875
1876 cdclk_state->bypass = 50000;
1877
1878 val = I915_READ(SKL_DSSM);
1879 switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
1880 default:
1881 MISSING_CASE(val);
1882 case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
1883 cdclk_state->ref = 24000;
1884 break;
1885 case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
1886 cdclk_state->ref = 19200;
1887 break;
1888 case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
1889 cdclk_state->ref = 38400;
1890 break;
1891 }
1892
1893 val = I915_READ(BXT_DE_PLL_ENABLE);
1894 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
1895 (val & BXT_DE_PLL_LOCK) == 0) {
1896 /*
1897 * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
1898 * setting it to zero is a way to signal that.
1899 */
1900 cdclk_state->vco = 0;
1901 cdclk_state->cdclk = cdclk_state->bypass;
1902 return;
1903 }
1904
1905 cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
1906
1907 val = I915_READ(CDCLK_CTL);
1908 WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
1909
1910 cdclk_state->cdclk = cdclk_state->vco / 2;
1911}
1912
1913/**
1914 * icl_init_cdclk - Initialize CDCLK on ICL
1915 * @dev_priv: i915 device
1916 *
1917 * Initialize CDCLK for ICL. This consists mainly of initializing
1918 * dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
1919 * is generally done only during the display core initialization sequence, after
1920 * which the DMC will take care of turning CDCLK off/on as needed.
1921 */
1922void icl_init_cdclk(struct drm_i915_private *dev_priv)
1923{
1924 struct intel_cdclk_state sanitized_state;
1925 u32 val;
1926
1927 /* This sets dev_priv->cdclk.hw. */
1928 intel_update_cdclk(dev_priv);
1929 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
1930
1931 /* This means CDCLK disabled. */
1932 if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
1933 goto sanitize;
1934
1935 val = I915_READ(CDCLK_CTL);
1936
1937 if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
1938 goto sanitize;
1939
1940 if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
1941 skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
1942 goto sanitize;
1943
1944 return;
1945
1946sanitize:
1947 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
1948
1949 sanitized_state.ref = dev_priv->cdclk.hw.ref;
1950 sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
1951 sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
1952 sanitized_state.cdclk);
1953
1954 icl_set_cdclk(dev_priv, &sanitized_state);
1955}
1956
1957/**
1958 * icl_uninit_cdclk - Uninitialize CDCLK on ICL
1959 * @dev_priv: i915 device
1960 *
1961 * Uninitialize CDCLK for ICL. This is done only during the display core
1962 * uninitialization sequence.
1963 */
1964void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
1965{
1966 struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
1967
1968 cdclk_state.cdclk = cdclk_state.bypass;
1969 cdclk_state.vco = 0;
1970
1971 icl_set_cdclk(dev_priv, &cdclk_state);
1972}
1973
1781/** 1974/**
1782 * cnl_init_cdclk - Initialize CDCLK on CNL 1975 * cnl_init_cdclk - Initialize CDCLK on CNL
1783 * @dev_priv: i915 device 1976 * @dev_priv: i915 device
@@ -2216,6 +2409,36 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
2216 return 0; 2409 return 0;
2217} 2410}
2218 2411
2412static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
2413{
2414 struct drm_i915_private *dev_priv = to_i915(state->dev);
2415 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2416 unsigned int ref = intel_state->cdclk.logical.ref;
2417 int min_cdclk, cdclk, vco;
2418
2419 min_cdclk = intel_compute_min_cdclk(state);
2420 if (min_cdclk < 0)
2421 return min_cdclk;
2422
2423 cdclk = icl_calc_cdclk(min_cdclk, ref);
2424 vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
2425
2426 intel_state->cdclk.logical.vco = vco;
2427 intel_state->cdclk.logical.cdclk = cdclk;
2428
2429 if (!intel_state->active_crtcs) {
2430 cdclk = icl_calc_cdclk(0, ref);
2431 vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
2432
2433 intel_state->cdclk.actual.vco = vco;
2434 intel_state->cdclk.actual.cdclk = cdclk;
2435 } else {
2436 intel_state->cdclk.actual = intel_state->cdclk.logical;
2437 }
2438
2439 return 0;
2440}
2441
2219static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 2442static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
2220{ 2443{
2221 int max_cdclk_freq = dev_priv->max_cdclk_freq; 2444 int max_cdclk_freq = dev_priv->max_cdclk_freq;
@@ -2233,7 +2456,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
2233 return max_cdclk_freq; 2456 return max_cdclk_freq;
2234 else if (IS_CHERRYVIEW(dev_priv)) 2457 else if (IS_CHERRYVIEW(dev_priv))
2235 return max_cdclk_freq*95/100; 2458 return max_cdclk_freq*95/100;
2236 else if (INTEL_INFO(dev_priv)->gen < 4) 2459 else if (INTEL_GEN(dev_priv) < 4)
2237 return 2*max_cdclk_freq*90/100; 2460 return 2*max_cdclk_freq*90/100;
2238 else 2461 else
2239 return max_cdclk_freq*90/100; 2462 return max_cdclk_freq*90/100;
@@ -2249,7 +2472,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
2249 */ 2472 */
2250void intel_update_max_cdclk(struct drm_i915_private *dev_priv) 2473void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
2251{ 2474{
2252 if (IS_CANNONLAKE(dev_priv)) { 2475 if (IS_ICELAKE(dev_priv)) {
2476 if (dev_priv->cdclk.hw.ref == 24000)
2477 dev_priv->max_cdclk_freq = 648000;
2478 else
2479 dev_priv->max_cdclk_freq = 652800;
2480 } else if (IS_CANNONLAKE(dev_priv)) {
2253 dev_priv->max_cdclk_freq = 528000; 2481 dev_priv->max_cdclk_freq = 528000;
2254 } else if (IS_GEN9_BC(dev_priv)) { 2482 } else if (IS_GEN9_BC(dev_priv)) {
2255 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 2483 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
@@ -2473,9 +2701,14 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
2473 dev_priv->display.set_cdclk = cnl_set_cdclk; 2701 dev_priv->display.set_cdclk = cnl_set_cdclk;
2474 dev_priv->display.modeset_calc_cdclk = 2702 dev_priv->display.modeset_calc_cdclk =
2475 cnl_modeset_calc_cdclk; 2703 cnl_modeset_calc_cdclk;
2704 } else if (IS_ICELAKE(dev_priv)) {
2705 dev_priv->display.set_cdclk = icl_set_cdclk;
2706 dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
2476 } 2707 }
2477 2708
2478 if (IS_CANNONLAKE(dev_priv)) 2709 if (IS_ICELAKE(dev_priv))
2710 dev_priv->display.get_cdclk = icl_get_cdclk;
2711 else if (IS_CANNONLAKE(dev_priv))
2479 dev_priv->display.get_cdclk = cnl_get_cdclk; 2712 dev_priv->display.get_cdclk = cnl_get_cdclk;
2480 else if (IS_GEN9_BC(dev_priv)) 2713 else if (IS_GEN9_BC(dev_priv))
2481 dev_priv->display.get_cdclk = skl_get_cdclk; 2714 dev_priv->display.get_cdclk = skl_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 1cd4a7c22bd5..391dd69ae0a4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -474,14 +474,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
474 return ret; 474 return ret;
475} 475}
476 476
477/**
478 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
479 *
480 * Not for i915G/i915GM
481 *
482 * \return true if CRT is connected.
483 * \return false if CRT is disconnected.
484 */
485static bool intel_crt_detect_hotplug(struct drm_connector *connector) 477static bool intel_crt_detect_hotplug(struct drm_connector *connector)
486{ 478{
487 struct drm_device *dev = connector->dev; 479 struct drm_device *dev = connector->dev;
@@ -807,10 +799,11 @@ intel_crt_detect(struct drm_connector *connector,
807 else 799 else
808 status = connector_status_unknown; 800 status = connector_status_unknown;
809 intel_release_load_detect_pipe(connector, &tmp, ctx); 801 intel_release_load_detect_pipe(connector, &tmp, ctx);
810 } else if (ret == 0) 802 } else if (ret == 0) {
811 status = connector_status_unknown; 803 status = connector_status_unknown;
812 else if (ret < 0) 804 } else {
813 status = ret; 805 status = ret;
806 }
814 807
815out: 808out:
816 intel_display_power_put(dev_priv, intel_encoder->power_domain); 809 intel_display_power_put(dev_priv, intel_encoder->power_domain);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cfcd9cb37d5d..8ca376aca8bd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2152,7 +2152,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2152 2152
2153 I915_WRITE(DPLL_CTRL2, val); 2153 I915_WRITE(DPLL_CTRL2, val);
2154 2154
2155 } else if (INTEL_INFO(dev_priv)->gen < 9) { 2155 } else if (INTEL_GEN(dev_priv) < 9) {
2156 I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); 2156 I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
2157 } 2157 }
2158 2158
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index a2c16140169f..298f8996cc54 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -586,3 +586,9 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
586 /* Initialize command stream timestamp frequency */ 586 /* Initialize command stream timestamp frequency */
587 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); 587 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
588} 588}
589
590void intel_driver_caps_print(const struct intel_driver_caps *caps,
591 struct drm_printer *p)
592{
593 drm_printf(p, "scheduler: %x\n", caps->scheduler);
594}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 9542018d11d0..71fdfb0451ef 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -167,6 +167,10 @@ struct intel_device_info {
167 } color; 167 } color;
168}; 168};
169 169
170struct intel_driver_caps {
171 unsigned int scheduler;
172};
173
170static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 174static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
171{ 175{
172 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); 176 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
@@ -182,4 +186,7 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
182void intel_device_info_dump_runtime(const struct intel_device_info *info, 186void intel_device_info_dump_runtime(const struct intel_device_info *info,
183 struct drm_printer *p); 187 struct drm_printer *p);
184 188
189void intel_driver_caps_print(const struct intel_driver_caps *caps,
190 struct drm_printer *p);
191
185#endif 192#endif
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 60ba5bb3f34c..5d46771d58f6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -558,11 +558,11 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
558} 558}
559 559
560#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 560#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
561/** 561
562/*
562 * Returns whether the given set of divisors are valid for a given refclk with 563 * Returns whether the given set of divisors are valid for a given refclk with
563 * the given connectors. 564 * the given connectors.
564 */ 565 */
565
566static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 566static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
567 const struct intel_limit *limit, 567 const struct intel_limit *limit,
568 const struct dpll *clock) 568 const struct dpll *clock)
@@ -2029,12 +2029,12 @@ static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_pr
2029 2029
2030static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2030static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2031{ 2031{
2032 if (INTEL_INFO(dev_priv)->gen >= 9) 2032 if (INTEL_GEN(dev_priv) >= 9)
2033 return 256 * 1024; 2033 return 256 * 1024;
2034 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2034 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2035 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2035 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2036 return 128 * 1024; 2036 return 128 * 1024;
2037 else if (INTEL_INFO(dev_priv)->gen >= 4) 2037 else if (INTEL_GEN(dev_priv) >= 4)
2038 return 4 * 1024; 2038 return 4 * 1024;
2039 else 2039 else
2040 return 0; 2040 return 0;
@@ -2068,13 +2068,16 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2068} 2068}
2069 2069
2070struct i915_vma * 2070struct i915_vma *
2071intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2071intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2072 unsigned int rotation,
2073 unsigned long *out_flags)
2072{ 2074{
2073 struct drm_device *dev = fb->dev; 2075 struct drm_device *dev = fb->dev;
2074 struct drm_i915_private *dev_priv = to_i915(dev); 2076 struct drm_i915_private *dev_priv = to_i915(dev);
2075 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2077 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2076 struct i915_ggtt_view view; 2078 struct i915_ggtt_view view;
2077 struct i915_vma *vma; 2079 struct i915_vma *vma;
2080 unsigned int pinctl;
2078 u32 alignment; 2081 u32 alignment;
2079 2082
2080 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2083 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -2102,7 +2105,20 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2102 2105
2103 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2106 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2104 2107
2105 vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); 2108 pinctl = 0;
2109
2110 /* Valleyview is definitely limited to scanning out the first
2111 * 512MiB. Lets presume this behaviour was inherited from the
2112 * g4x display engine and that all earlier gen are similarly
2113 * limited. Testing suggests that it is a little more
2114 * complicated than this. For example, Cherryview appears quite
2115 * happy to scanout from anywhere within its global aperture.
2116 */
2117 if (HAS_GMCH_DISPLAY(dev_priv))
2118 pinctl |= PIN_MAPPABLE;
2119
2120 vma = i915_gem_object_pin_to_display_plane(obj,
2121 alignment, &view, pinctl);
2106 if (IS_ERR(vma)) 2122 if (IS_ERR(vma))
2107 goto err; 2123 goto err;
2108 2124
@@ -2123,7 +2139,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2123 * something and try to run the system in a "less than optimal" 2139 * something and try to run the system in a "less than optimal"
2124 * mode that matches the user configuration. 2140 * mode that matches the user configuration.
2125 */ 2141 */
2126 i915_vma_pin_fence(vma); 2142 if (i915_vma_pin_fence(vma) == 0 && vma->fence)
2143 *out_flags |= PLANE_HAS_FENCE;
2127 } 2144 }
2128 2145
2129 i915_vma_get(vma); 2146 i915_vma_get(vma);
@@ -2134,11 +2151,12 @@ err:
2134 return vma; 2151 return vma;
2135} 2152}
2136 2153
2137void intel_unpin_fb_vma(struct i915_vma *vma) 2154void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2138{ 2155{
2139 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2156 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2140 2157
2141 i915_vma_unpin_fence(vma); 2158 if (flags & PLANE_HAS_FENCE)
2159 i915_vma_unpin_fence(vma);
2142 i915_gem_object_unpin_from_display_plane(vma); 2160 i915_gem_object_unpin_from_display_plane(vma);
2143 i915_vma_put(vma); 2161 i915_vma_put(vma);
2144} 2162}
@@ -2808,7 +2826,9 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2808valid_fb: 2826valid_fb:
2809 mutex_lock(&dev->struct_mutex); 2827 mutex_lock(&dev->struct_mutex);
2810 intel_state->vma = 2828 intel_state->vma =
2811 intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 2829 intel_pin_and_fence_fb_obj(fb,
2830 primary->state->rotation,
2831 &intel_state->flags);
2812 mutex_unlock(&dev->struct_mutex); 2832 mutex_unlock(&dev->struct_mutex);
2813 if (IS_ERR(intel_state->vma)) { 2833 if (IS_ERR(intel_state->vma)) {
2814 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 2834 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
@@ -3163,7 +3183,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3163 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3183 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3164 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3184 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3165 3185
3166 if (INTEL_GEN(dev_priv) < 4) 3186 if (INTEL_GEN(dev_priv) < 5)
3167 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3187 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3168 3188
3169 switch (fb->format->format) { 3189 switch (fb->format->format) {
@@ -4756,8 +4776,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4756 4776
4757/** 4777/**
4758 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4778 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4759 * 4779 * @crtc_state: crtc's scaler state
4760 * @state: crtc's scaler state
4761 * @plane_state: atomic plane state to update 4780 * @plane_state: atomic plane state to update
4762 * 4781 *
4763 * Return 4782 * Return
@@ -4954,6 +4973,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4954/** 4973/**
4955 * intel_post_enable_primary - Perform operations after enabling primary plane 4974 * intel_post_enable_primary - Perform operations after enabling primary plane
4956 * @crtc: the CRTC whose primary plane was just enabled 4975 * @crtc: the CRTC whose primary plane was just enabled
4976 * @new_crtc_state: the enabling state
4957 * 4977 *
4958 * Performs potentially sleeping operations that must be done after the primary 4978 * Performs potentially sleeping operations that must be done after the primary
4959 * plane is enabled, such as updating FBC and IPS. Note that this may be 4979 * plane is enabled, such as updating FBC and IPS. Note that this may be
@@ -5418,6 +5438,20 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5418 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 5438 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5419} 5439}
5420 5440
5441static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5442{
5443 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5444 enum pipe pipe = crtc->pipe;
5445 uint32_t val;
5446
5447 val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
5448
5449 /* Program B credit equally to all pipes */
5450 val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5451
5452 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5453}
5454
5421static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 5455static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5422 struct drm_atomic_state *old_state) 5456 struct drm_atomic_state *old_state)
5423{ 5457{
@@ -5495,6 +5529,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5495 if (dev_priv->display.initial_watermarks != NULL) 5529 if (dev_priv->display.initial_watermarks != NULL)
5496 dev_priv->display.initial_watermarks(old_intel_state, pipe_config); 5530 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5497 5531
5532 if (INTEL_GEN(dev_priv) >= 11)
5533 icl_pipe_mbus_enable(intel_crtc);
5534
5498 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5535 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5499 if (!transcoder_is_dsi(cpu_transcoder)) 5536 if (!transcoder_is_dsi(cpu_transcoder))
5500 intel_enable_pipe(pipe_config); 5537 intel_enable_pipe(pipe_config);
@@ -6307,7 +6344,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6307 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6344 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6308 6345
6309 /* GDG double wide on either pipe, otherwise pipe A only */ 6346 /* GDG double wide on either pipe, otherwise pipe A only */
6310 return INTEL_INFO(dev_priv)->gen < 4 && 6347 return INTEL_GEN(dev_priv) < 4 &&
6311 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6348 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6312} 6349}
6313 6350
@@ -8194,7 +8231,7 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
8194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8231 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8195 struct intel_crtc_state *config = intel_crtc->config; 8232 struct intel_crtc_state *config = intel_crtc->config;
8196 8233
8197 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8234 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8198 u32 val = 0; 8235 u32 val = 0;
8199 8236
8200 switch (intel_crtc->config->pipe_bpp) { 8237 switch (intel_crtc->config->pipe_bpp) {
@@ -9537,7 +9574,8 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9537 if (HAS_DDI(dev_priv)) 9574 if (HAS_DDI(dev_priv))
9538 cntl |= CURSOR_PIPE_CSC_ENABLE; 9575 cntl |= CURSOR_PIPE_CSC_ENABLE;
9539 9576
9540 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 9577 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9578 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
9541 9579
9542 switch (plane_state->base.crtc_w) { 9580 switch (plane_state->base.crtc_w) {
9543 case 64: 9581 case 64:
@@ -10702,6 +10740,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
10702 struct drm_connector_list_iter conn_iter; 10740 struct drm_connector_list_iter conn_iter;
10703 unsigned int used_ports = 0; 10741 unsigned int used_ports = 0;
10704 unsigned int used_mst_ports = 0; 10742 unsigned int used_mst_ports = 0;
10743 bool ret = true;
10705 10744
10706 /* 10745 /*
10707 * Walk the connector list instead of the encoder 10746 * Walk the connector list instead of the encoder
@@ -10736,7 +10775,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
10736 10775
10737 /* the same port mustn't appear more than once */ 10776 /* the same port mustn't appear more than once */
10738 if (used_ports & port_mask) 10777 if (used_ports & port_mask)
10739 return false; 10778 ret = false;
10740 10779
10741 used_ports |= port_mask; 10780 used_ports |= port_mask;
10742 break; 10781 break;
@@ -10754,7 +10793,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
10754 if (used_ports & used_mst_ports) 10793 if (used_ports & used_mst_ports)
10755 return false; 10794 return false;
10756 10795
10757 return true; 10796 return ret;
10758} 10797}
10759 10798
10760static void 10799static void
@@ -12075,7 +12114,7 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12075 struct drm_device *dev = crtc->base.dev; 12114 struct drm_device *dev = crtc->base.dev;
12076 12115
12077 if (!dev->max_vblank_count) 12116 if (!dev->max_vblank_count)
12078 return drm_crtc_accurate_vblank_count(&crtc->base); 12117 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12079 12118
12080 return dev->driver->get_vblank_counter(dev, crtc->pipe); 12119 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12081} 12120}
@@ -12616,7 +12655,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
12616/** 12655/**
12617 * intel_prepare_plane_fb - Prepare fb for usage on plane 12656 * intel_prepare_plane_fb - Prepare fb for usage on plane
12618 * @plane: drm plane to prepare for 12657 * @plane: drm plane to prepare for
12619 * @fb: framebuffer to prepare for presentation 12658 * @new_state: the plane state being prepared
12620 * 12659 *
12621 * Prepares a framebuffer for usage on a display plane. Generally this 12660 * Prepares a framebuffer for usage on a display plane. Generally this
12622 * involves pinning the underlying object and updating the frontbuffer tracking 12661 * involves pinning the underlying object and updating the frontbuffer tracking
@@ -12695,7 +12734,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
12695 } else { 12734 } else {
12696 struct i915_vma *vma; 12735 struct i915_vma *vma;
12697 12736
12698 vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 12737 vma = intel_pin_and_fence_fb_obj(fb,
12738 new_state->rotation,
12739 &to_intel_plane_state(new_state)->flags);
12699 if (!IS_ERR(vma)) 12740 if (!IS_ERR(vma))
12700 to_intel_plane_state(new_state)->vma = vma; 12741 to_intel_plane_state(new_state)->vma = vma;
12701 else 12742 else
@@ -12734,7 +12775,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
12734/** 12775/**
12735 * intel_cleanup_plane_fb - Cleans up an fb after plane use 12776 * intel_cleanup_plane_fb - Cleans up an fb after plane use
12736 * @plane: drm plane to clean up for 12777 * @plane: drm plane to clean up for
12737 * @fb: old framebuffer that was on plane 12778 * @old_state: the state from the previous modeset
12738 * 12779 *
12739 * Cleans up a framebuffer that has just been removed from a plane. 12780 * Cleans up a framebuffer that has just been removed from a plane.
12740 * 12781 *
@@ -12750,7 +12791,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
12750 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); 12791 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
12751 if (vma) { 12792 if (vma) {
12752 mutex_lock(&plane->dev->struct_mutex); 12793 mutex_lock(&plane->dev->struct_mutex);
12753 intel_unpin_fb_vma(vma); 12794 intel_unpin_fb_vma(vma, to_intel_plane_state(old_state)->flags);
12754 mutex_unlock(&plane->dev->struct_mutex); 12795 mutex_unlock(&plane->dev->struct_mutex);
12755 } 12796 }
12756} 12797}
@@ -13111,7 +13152,9 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13111 goto out_unlock; 13152 goto out_unlock;
13112 } 13153 }
13113 } else { 13154 } else {
13114 vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation); 13155 vma = intel_pin_and_fence_fb_obj(fb,
13156 new_plane_state->rotation,
13157 &to_intel_plane_state(new_plane_state)->flags);
13115 if (IS_ERR(vma)) { 13158 if (IS_ERR(vma)) {
13116 DRM_DEBUG_KMS("failed to pin object\n"); 13159 DRM_DEBUG_KMS("failed to pin object\n");
13117 13160
@@ -13142,7 +13185,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13142 13185
13143 old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma); 13186 old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma);
13144 if (old_vma) 13187 if (old_vma)
13145 intel_unpin_fb_vma(old_vma); 13188 intel_unpin_fb_vma(old_vma,
13189 to_intel_plane_state(old_plane_state)->flags);
13146 13190
13147out_unlock: 13191out_unlock:
13148 mutex_unlock(&dev_priv->drm.struct_mutex); 13192 mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -13498,8 +13542,8 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
13498 return to_intel_crtc(connector->base.state->crtc)->pipe; 13542 return to_intel_crtc(connector->base.state->crtc)->pipe;
13499} 13543}
13500 13544
13501int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 13545int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
13502 struct drm_file *file) 13546 struct drm_file *file)
13503{ 13547{
13504 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13548 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
13505 struct drm_crtc *drmmode_crtc; 13549 struct drm_crtc *drmmode_crtc;
@@ -13947,7 +13991,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
13947 * gen2/3 display engine uses the fence if present, 13991 * gen2/3 display engine uses the fence if present,
13948 * so the tiling mode must match the fb modifier exactly. 13992 * so the tiling mode must match the fb modifier exactly.
13949 */ 13993 */
13950 if (INTEL_INFO(dev_priv)->gen < 4 && 13994 if (INTEL_GEN(dev_priv) < 4 &&
13951 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 13995 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
13952 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 13996 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
13953 goto err; 13997 goto err;
@@ -14162,7 +14206,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14162{ 14206{
14163 intel_init_cdclk_hooks(dev_priv); 14207 intel_init_cdclk_hooks(dev_priv);
14164 14208
14165 if (INTEL_INFO(dev_priv)->gen >= 9) { 14209 if (INTEL_GEN(dev_priv) >= 9) {
14166 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14210 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14167 dev_priv->display.get_initial_plane_config = 14211 dev_priv->display.get_initial_plane_config =
14168 skylake_get_initial_plane_config; 14212 skylake_get_initial_plane_config;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f10a14330e7c..f20b25f98e5a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -258,7 +258,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
258 if (IS_CNL_WITH_PORT_F(dev_priv)) 258 if (IS_CNL_WITH_PORT_F(dev_priv))
259 return 810000; 259 return 810000;
260 260
261 /* For other SKUs, max rate on ports A and B is 5.4G */ 261 /* For other SKUs, max rate on ports A and D is 5.4G */
262 if (port == PORT_A || port == PORT_D) 262 if (port == PORT_A || port == PORT_D)
263 return 540000; 263 return 540000;
264 264
@@ -1467,7 +1467,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1467static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv, 1467static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1468 enum port port) 1468 enum port port)
1469{ 1469{
1470 if (INTEL_INFO(dev_priv)->gen >= 9) 1470 if (INTEL_GEN(dev_priv) >= 9)
1471 return skl_aux_ctl_reg(dev_priv, port); 1471 return skl_aux_ctl_reg(dev_priv, port);
1472 else if (HAS_PCH_SPLIT(dev_priv)) 1472 else if (HAS_PCH_SPLIT(dev_priv))
1473 return ilk_aux_ctl_reg(dev_priv, port); 1473 return ilk_aux_ctl_reg(dev_priv, port);
@@ -1478,7 +1478,7 @@ static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1478static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv, 1478static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1479 enum port port, int index) 1479 enum port port, int index)
1480{ 1480{
1481 if (INTEL_INFO(dev_priv)->gen >= 9) 1481 if (INTEL_GEN(dev_priv) >= 9)
1482 return skl_aux_data_reg(dev_priv, port, index); 1482 return skl_aux_data_reg(dev_priv, port, index);
1483 else if (HAS_PCH_SPLIT(dev_priv)) 1483 else if (HAS_PCH_SPLIT(dev_priv))
1484 return ilk_aux_data_reg(dev_priv, port, index); 1484 return ilk_aux_data_reg(dev_priv, port, index);
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 76473e9836c6..c8e9e44e5981 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -147,7 +147,7 @@ struct bxt_ddi_phy_info {
147 */ 147 */
148 struct { 148 struct {
149 /** 149 /**
150 * @port: which port maps to this channel. 150 * @channel.port: which port maps to this channel.
151 */ 151 */
152 enum port port; 152 enum port port;
153 } channel[2]; 153 } channel[2];
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 468ec1e90e16..50874f4035cf 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -204,6 +204,7 @@ struct intel_fbdev {
204 struct drm_fb_helper helper; 204 struct drm_fb_helper helper;
205 struct intel_framebuffer *fb; 205 struct intel_framebuffer *fb;
206 struct i915_vma *vma; 206 struct i915_vma *vma;
207 unsigned long vma_flags;
207 async_cookie_t cookie; 208 async_cookie_t cookie;
208 int preferred_bpp; 209 int preferred_bpp;
209}; 210};
@@ -490,6 +491,8 @@ struct intel_atomic_state {
490struct intel_plane_state { 491struct intel_plane_state {
491 struct drm_plane_state base; 492 struct drm_plane_state base;
492 struct i915_vma *vma; 493 struct i915_vma *vma;
494 unsigned long flags;
495#define PLANE_HAS_FENCE BIT(0)
493 496
494 struct { 497 struct {
495 u32 offset; 498 u32 offset;
@@ -1407,6 +1410,8 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv);
1407void cnl_uninit_cdclk(struct drm_i915_private *dev_priv); 1410void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
1408void bxt_init_cdclk(struct drm_i915_private *dev_priv); 1411void bxt_init_cdclk(struct drm_i915_private *dev_priv);
1409void bxt_uninit_cdclk(struct drm_i915_private *dev_priv); 1412void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
1413void icl_init_cdclk(struct drm_i915_private *dev_priv);
1414void icl_uninit_cdclk(struct drm_i915_private *dev_priv);
1410void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); 1415void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
1411void intel_update_max_cdclk(struct drm_i915_private *dev_priv); 1416void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
1412void intel_update_cdclk(struct drm_i915_private *dev_priv); 1417void intel_update_cdclk(struct drm_i915_private *dev_priv);
@@ -1455,8 +1460,8 @@ struct drm_display_mode *
1455intel_encoder_current_mode(struct intel_encoder *encoder); 1460intel_encoder_current_mode(struct intel_encoder *encoder);
1456 1461
1457enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); 1462enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
1458int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 1463int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
1459 struct drm_file *file_priv); 1464 struct drm_file *file_priv);
1460enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1465enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1461 enum pipe pipe); 1466 enum pipe pipe);
1462static inline bool 1467static inline bool
@@ -1501,8 +1506,10 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1501 struct intel_load_detect_pipe *old, 1506 struct intel_load_detect_pipe *old,
1502 struct drm_modeset_acquire_ctx *ctx); 1507 struct drm_modeset_acquire_ctx *ctx);
1503struct i915_vma * 1508struct i915_vma *
1504intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1509intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1505void intel_unpin_fb_vma(struct i915_vma *vma); 1510 unsigned int rotation,
1511 unsigned long *out_flags);
1512void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
1506struct drm_framebuffer * 1513struct drm_framebuffer *
1507intel_framebuffer_create(struct drm_i915_gem_object *obj, 1514intel_framebuffer_create(struct drm_i915_gem_object *obj,
1508 struct drm_mode_fb_cmd2 *mode_cmd); 1515 struct drm_mode_fb_cmd2 *mode_cmd);
@@ -2018,8 +2025,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
2018 int usecs); 2025 int usecs);
2019struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, 2026struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
2020 enum pipe pipe, int plane); 2027 enum pipe pipe, int plane);
2021int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 2028int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
2022 struct drm_file *file_priv); 2029 struct drm_file *file_priv);
2023void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); 2030void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
2024void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); 2031void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
2025void skl_update_plane(struct intel_plane *plane, 2032void skl_update_plane(struct intel_plane *plane,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 59c066ca14e5..eb0c559b2715 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -245,7 +245,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
245 intel_dvo->attached_connector->panel.fixed_mode; 245 intel_dvo->attached_connector->panel.fixed_mode;
246 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 246 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
247 247
248 /* If we have timings from the BIOS for the panel, put them in 248 /*
249 * If we have timings from the BIOS for the panel, put them in
249 * to the adjusted mode. The CRTC will be set up for this mode, 250 * to the adjusted mode. The CRTC will be set up for this mode,
250 * with the panel scaling set up to source from the H/VDisplay 251 * with the panel scaling set up to source from the H/VDisplay
251 * of the original mode. 252 * of the original mode.
@@ -293,11 +294,6 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
293 I915_WRITE(dvo_reg, dvo_val); 294 I915_WRITE(dvo_reg, dvo_val);
294} 295}
295 296
296/**
297 * Detect the output connection on our DVO device.
298 *
299 * Unimplemented.
300 */
301static enum drm_connector_status 297static enum drm_connector_status
302intel_dvo_detect(struct drm_connector *connector, bool force) 298intel_dvo_detect(struct drm_connector *connector, bool force)
303{ 299{
@@ -313,7 +309,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
313 const struct drm_display_mode *fixed_mode = 309 const struct drm_display_mode *fixed_mode =
314 to_intel_connector(connector)->panel.fixed_mode; 310 to_intel_connector(connector)->panel.fixed_mode;
315 311
316 /* We should probably have an i2c driver get_modes function for those 312 /*
313 * We should probably have an i2c driver get_modes function for those
317 * devices which will have a fixed set of modes determined by the chip 314 * devices which will have a fixed set of modes determined by the chip
318 * (TV-out, for example), but for now with just TMDS and LVDS, 315 * (TV-out, for example), but for now with just TMDS and LVDS,
319 * that's not the case. 316 * that's not the case.
@@ -371,7 +368,7 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
371 .destroy = intel_dvo_enc_destroy, 368 .destroy = intel_dvo_enc_destroy,
372}; 369};
373 370
374/** 371/*
375 * Attempts to get a fixed panel timing for LVDS (currently only the i830). 372 * Attempts to get a fixed panel timing for LVDS (currently only the i830).
376 * 373 *
377 * Other chips with DVO LVDS will need to extend this to deal with the LVDS 374 * Other chips with DVO LVDS will need to extend this to deal with the LVDS
@@ -443,7 +440,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
443 uint32_t dpll[I915_MAX_PIPES]; 440 uint32_t dpll[I915_MAX_PIPES];
444 enum port port; 441 enum port port;
445 442
446 /* Allow the I2C driver info to specify the GPIO to be used in 443 /*
444 * Allow the I2C driver info to specify the GPIO to be used in
447 * special cases, but otherwise default to what's defined 445 * special cases, but otherwise default to what's defined
448 * in the spec. 446 * in the spec.
449 */ 447 */
@@ -454,7 +452,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
454 else 452 else
455 gpio = GMBUS_PIN_DPB; 453 gpio = GMBUS_PIN_DPB;
456 454
457 /* Set up the I2C bus necessary for the chip we're probing. 455 /*
456 * Set up the I2C bus necessary for the chip we're probing.
458 * It appears that everything is on GPIOE except for panels 457 * It appears that everything is on GPIOE except for panels
459 * on i830 laptops, which are on GPIOB (DVOA). 458 * on i830 laptops, which are on GPIOB (DVOA).
460 */ 459 */
@@ -462,12 +461,14 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
462 461
463 intel_dvo->dev = *dvo; 462 intel_dvo->dev = *dvo;
464 463
465 /* GMBUS NAK handling seems to be unstable, hence let the 464 /*
465 * GMBUS NAK handling seems to be unstable, hence let the
466 * transmitter detection run in bit banging mode for now. 466 * transmitter detection run in bit banging mode for now.
467 */ 467 */
468 intel_gmbus_force_bit(i2c, true); 468 intel_gmbus_force_bit(i2c, true);
469 469
470 /* ns2501 requires the DVO 2x clock before it will 470 /*
471 * ns2501 requires the DVO 2x clock before it will
471 * respond to i2c accesses, so make sure we have 472 * respond to i2c accesses, so make sure we have
472 * have the clock enabled before we attempt to 473 * have the clock enabled before we attempt to
473 * initialize the device. 474 * initialize the device.
@@ -525,7 +526,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
525 526
526 intel_connector_attach_encoder(intel_connector, intel_encoder); 527 intel_connector_attach_encoder(intel_connector, intel_encoder);
527 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 528 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
528 /* For our LVDS chipsets, we should hopefully be able 529 /*
530 * For our LVDS chipsets, we should hopefully be able
529 * to dig the fixed panel mode out of the BIOS data. 531 * to dig the fixed panel mode out of the BIOS data.
530 * However, it's in a different format from the BIOS 532 * However, it's in a different format from the BIOS
531 * data on chipsets with integrated LVDS (stored in AIM 533 * data on chipsets with integrated LVDS (stored in AIM
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 7eebfbb95e89..f3c5100d629e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -631,7 +631,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
631 * Similarly the preempt context must always be available so that 631 * Similarly the preempt context must always be available so that
632 * we can interrupt the engine at any time. 632 * we can interrupt the engine at any time.
633 */ 633 */
634 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { 634 if (engine->i915->preempt_context) {
635 ring = engine->context_pin(engine, 635 ring = engine->context_pin(engine,
636 engine->i915->preempt_context); 636 engine->i915->preempt_context);
637 if (IS_ERR(ring)) { 637 if (IS_ERR(ring)) {
@@ -656,7 +656,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
656err_breadcrumbs: 656err_breadcrumbs:
657 intel_engine_fini_breadcrumbs(engine); 657 intel_engine_fini_breadcrumbs(engine);
658err_unpin_preempt: 658err_unpin_preempt:
659 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) 659 if (engine->i915->preempt_context)
660 engine->context_unpin(engine, engine->i915->preempt_context); 660 engine->context_unpin(engine, engine->i915->preempt_context);
661err_unpin_kernel: 661err_unpin_kernel:
662 engine->context_unpin(engine, engine->i915->kernel_context); 662 engine->context_unpin(engine, engine->i915->kernel_context);
@@ -686,12 +686,12 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
686 if (engine->default_state) 686 if (engine->default_state)
687 i915_gem_object_put(engine->default_state); 687 i915_gem_object_put(engine->default_state);
688 688
689 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) 689 if (engine->i915->preempt_context)
690 engine->context_unpin(engine, engine->i915->preempt_context); 690 engine->context_unpin(engine, engine->i915->preempt_context);
691 engine->context_unpin(engine, engine->i915->kernel_context); 691 engine->context_unpin(engine, engine->i915->kernel_context);
692} 692}
693 693
694u64 intel_engine_get_active_head(struct intel_engine_cs *engine) 694u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
695{ 695{
696 struct drm_i915_private *dev_priv = engine->i915; 696 struct drm_i915_private *dev_priv = engine->i915;
697 u64 acthd; 697 u64 acthd;
@@ -707,7 +707,7 @@ u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
707 return acthd; 707 return acthd;
708} 708}
709 709
710u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine) 710u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
711{ 711{
712 struct drm_i915_private *dev_priv = engine->i915; 712 struct drm_i915_private *dev_priv = engine->i915;
713 u64 bbaddr; 713 u64 bbaddr;
@@ -1464,7 +1464,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
1464 struct drm_i915_private *dev_priv = engine->i915; 1464 struct drm_i915_private *dev_priv = engine->i915;
1465 bool idle = true; 1465 bool idle = true;
1466 1466
1467 intel_runtime_pm_get(dev_priv); 1467 /* If the whole device is asleep, the engine must be idle */
1468 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1469 return true;
1468 1470
1469 /* First check that no commands are left in the ring */ 1471 /* First check that no commands are left in the ring */
1470 if ((I915_READ_HEAD(engine) & HEAD_ADDR) != 1472 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
@@ -1503,10 +1505,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
1503 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) 1505 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1504 return true; 1506 return true;
1505 1507
1506 /* Interrupt/tasklet pending? */
1507 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
1508 return false;
1509
1510 /* Waiting to drain ELSP? */ 1508 /* Waiting to drain ELSP? */
1511 if (READ_ONCE(engine->execlists.active)) 1509 if (READ_ONCE(engine->execlists.active))
1512 return false; 1510 return false;
@@ -1707,73 +1705,20 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1707 } 1705 }
1708} 1706}
1709 1707
1710void intel_engine_dump(struct intel_engine_cs *engine, 1708static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1711 struct drm_printer *m, 1709 struct drm_printer *m)
1712 const char *header, ...)
1713{ 1710{
1714 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
1715 const struct intel_engine_execlists * const execlists = &engine->execlists;
1716 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1717 struct drm_i915_private *dev_priv = engine->i915; 1711 struct drm_i915_private *dev_priv = engine->i915;
1718 struct drm_i915_gem_request *rq; 1712 const struct intel_engine_execlists * const execlists =
1719 struct rb_node *rb; 1713 &engine->execlists;
1720 char hdr[80];
1721 u64 addr; 1714 u64 addr;
1722 1715
1723 if (header) { 1716 drm_printf(m, "\tRING_START: 0x%08x\n",
1724 va_list ap; 1717 I915_READ(RING_START(engine->mmio_base)));
1725 1718 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1726 va_start(ap, header); 1719 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
1727 drm_vprintf(m, header, &ap); 1720 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1728 va_end(ap); 1721 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
1729 }
1730
1731 if (i915_terminally_wedged(&engine->i915->gpu_error))
1732 drm_printf(m, "*** WEDGED ***\n");
1733
1734 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1735 intel_engine_get_seqno(engine),
1736 intel_engine_last_submit(engine),
1737 engine->hangcheck.seqno,
1738 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
1739 engine->timeline->inflight_seqnos);
1740 drm_printf(m, "\tReset count: %d (global %d)\n",
1741 i915_reset_engine_count(error, engine),
1742 i915_reset_count(error));
1743
1744 rcu_read_lock();
1745
1746 drm_printf(m, "\tRequests:\n");
1747
1748 rq = list_first_entry(&engine->timeline->requests,
1749 struct drm_i915_gem_request, link);
1750 if (&rq->link != &engine->timeline->requests)
1751 print_request(m, rq, "\t\tfirst ");
1752
1753 rq = list_last_entry(&engine->timeline->requests,
1754 struct drm_i915_gem_request, link);
1755 if (&rq->link != &engine->timeline->requests)
1756 print_request(m, rq, "\t\tlast ");
1757
1758 rq = i915_gem_find_active_request(engine);
1759 if (rq) {
1760 print_request(m, rq, "\t\tactive ");
1761 drm_printf(m,
1762 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
1763 rq->head, rq->postfix, rq->tail,
1764 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1765 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1766 }
1767
1768 drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
1769 I915_READ(RING_START(engine->mmio_base)),
1770 rq ? i915_ggtt_offset(rq->ring->vma) : 0);
1771 drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
1772 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
1773 rq ? rq->ring->head : 0);
1774 drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
1775 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
1776 rq ? rq->ring->tail : 0);
1777 drm_printf(m, "\tRING_CTL: 0x%08x%s\n", 1722 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1778 I915_READ(RING_CTL(engine->mmio_base)), 1723 I915_READ(RING_CTL(engine->mmio_base)),
1779 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); 1724 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
@@ -1782,6 +1727,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1782 I915_READ(RING_MI_MODE(engine->mmio_base)), 1727 I915_READ(RING_MI_MODE(engine->mmio_base)),
1783 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); 1728 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
1784 } 1729 }
1730
1731 if (INTEL_GEN(dev_priv) >= 6) {
1732 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1733 }
1734
1785 if (HAS_LEGACY_SEMAPHORES(dev_priv)) { 1735 if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
1786 drm_printf(m, "\tSYNC_0: 0x%08x\n", 1736 drm_printf(m, "\tSYNC_0: 0x%08x\n",
1787 I915_READ(RING_SYNC_0(engine->mmio_base))); 1737 I915_READ(RING_SYNC_0(engine->mmio_base)));
@@ -1792,8 +1742,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1792 I915_READ(RING_SYNC_2(engine->mmio_base))); 1742 I915_READ(RING_SYNC_2(engine->mmio_base)));
1793 } 1743 }
1794 1744
1795 rcu_read_unlock();
1796
1797 addr = intel_engine_get_active_head(engine); 1745 addr = intel_engine_get_active_head(engine);
1798 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", 1746 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1799 upper_32_bits(addr), lower_32_bits(addr)); 1747 upper_32_bits(addr), lower_32_bits(addr));
@@ -1855,10 +1803,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1855 1803
1856 rcu_read_lock(); 1804 rcu_read_lock();
1857 for (idx = 0; idx < execlists_num_ports(execlists); idx++) { 1805 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1806 struct drm_i915_gem_request *rq;
1858 unsigned int count; 1807 unsigned int count;
1859 1808
1860 rq = port_unpack(&execlists->port[idx], &count); 1809 rq = port_unpack(&execlists->port[idx], &count);
1861 if (rq) { 1810 if (rq) {
1811 char hdr[80];
1812
1862 snprintf(hdr, sizeof(hdr), 1813 snprintf(hdr, sizeof(hdr),
1863 "\t\tELSP[%d] count=%d, rq: ", 1814 "\t\tELSP[%d] count=%d, rq: ",
1864 idx, count); 1815 idx, count);
@@ -1877,6 +1828,77 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1877 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 1828 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1878 I915_READ(RING_PP_DIR_DCLV(engine))); 1829 I915_READ(RING_PP_DIR_DCLV(engine)));
1879 } 1830 }
1831}
1832
1833void intel_engine_dump(struct intel_engine_cs *engine,
1834 struct drm_printer *m,
1835 const char *header, ...)
1836{
1837 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
1838 const struct intel_engine_execlists * const execlists = &engine->execlists;
1839 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1840 struct drm_i915_gem_request *rq;
1841 struct rb_node *rb;
1842
1843 if (header) {
1844 va_list ap;
1845
1846 va_start(ap, header);
1847 drm_vprintf(m, header, &ap);
1848 va_end(ap);
1849 }
1850
1851 if (i915_terminally_wedged(&engine->i915->gpu_error))
1852 drm_printf(m, "*** WEDGED ***\n");
1853
1854 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1855 intel_engine_get_seqno(engine),
1856 intel_engine_last_submit(engine),
1857 engine->hangcheck.seqno,
1858 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
1859 engine->timeline->inflight_seqnos);
1860 drm_printf(m, "\tReset count: %d (global %d)\n",
1861 i915_reset_engine_count(error, engine),
1862 i915_reset_count(error));
1863
1864 rcu_read_lock();
1865
1866 drm_printf(m, "\tRequests:\n");
1867
1868 rq = list_first_entry(&engine->timeline->requests,
1869 struct drm_i915_gem_request, link);
1870 if (&rq->link != &engine->timeline->requests)
1871 print_request(m, rq, "\t\tfirst ");
1872
1873 rq = list_last_entry(&engine->timeline->requests,
1874 struct drm_i915_gem_request, link);
1875 if (&rq->link != &engine->timeline->requests)
1876 print_request(m, rq, "\t\tlast ");
1877
1878 rq = i915_gem_find_active_request(engine);
1879 if (rq) {
1880 print_request(m, rq, "\t\tactive ");
1881 drm_printf(m,
1882 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
1883 rq->head, rq->postfix, rq->tail,
1884 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1885 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1886 drm_printf(m, "\t\tring->start: 0x%08x\n",
1887 i915_ggtt_offset(rq->ring->vma));
1888 drm_printf(m, "\t\tring->head: 0x%08x\n",
1889 rq->ring->head);
1890 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1891 rq->ring->tail);
1892 }
1893
1894 rcu_read_unlock();
1895
1896 if (intel_runtime_pm_get_if_in_use(engine->i915)) {
1897 intel_engine_print_registers(engine, m);
1898 intel_runtime_pm_put(engine->i915);
1899 } else {
1900 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1901 }
1880 1902
1881 spin_lock_irq(&engine->timeline->lock); 1903 spin_lock_irq(&engine->timeline->lock);
1882 list_for_each_entry(rq, &engine->timeline->requests, link) 1904 list_for_each_entry(rq, &engine->timeline->requests, link)
@@ -1899,10 +1921,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1899 } 1921 }
1900 spin_unlock_irq(&b->rb_lock); 1922 spin_unlock_irq(&b->rb_lock);
1901 1923
1902 if (INTEL_GEN(dev_priv) >= 6) {
1903 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1904 }
1905
1906 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", 1924 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
1907 engine->irq_posted, 1925 engine->irq_posted,
1908 yesno(test_bit(ENGINE_IRQ_BREADCRUMB, 1926 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d7d1ac79c38a..f66f6fb5743d 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -183,7 +183,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
183 else 183 else
184 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 184 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
185 185
186 if (params->vma->fence) { 186 if (params->flags & PLANE_HAS_FENCE) {
187 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; 187 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
188 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 188 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
189 } else { 189 } else {
@@ -241,7 +241,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
241 break; 241 break;
242 } 242 }
243 243
244 if (params->vma->fence) { 244 if (params->flags & PLANE_HAS_FENCE) {
245 dpfc_ctl |= DPFC_CTL_FENCE_EN; 245 dpfc_ctl |= DPFC_CTL_FENCE_EN;
246 if (IS_GEN5(dev_priv)) 246 if (IS_GEN5(dev_priv))
247 dpfc_ctl |= params->vma->fence->id; 247 dpfc_ctl |= params->vma->fence->id;
@@ -324,7 +324,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
324 break; 324 break;
325 } 325 }
326 326
327 if (params->vma->fence) { 327 if (params->flags & PLANE_HAS_FENCE) {
328 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 328 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
329 I915_WRITE(SNB_DPFC_CTL_SA, 329 I915_WRITE(SNB_DPFC_CTL_SA,
330 SNB_CPU_FENCE_ENABLE | 330 SNB_CPU_FENCE_ENABLE |
@@ -753,6 +753,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
753 struct drm_framebuffer *fb = plane_state->base.fb; 753 struct drm_framebuffer *fb = plane_state->base.fb;
754 754
755 cache->vma = NULL; 755 cache->vma = NULL;
756 cache->flags = 0;
756 757
757 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 758 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
758 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 759 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -778,6 +779,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
778 cache->fb.stride = fb->pitches[0]; 779 cache->fb.stride = fb->pitches[0];
779 780
780 cache->vma = plane_state->vma; 781 cache->vma = plane_state->vma;
782 cache->flags = plane_state->flags;
783 if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
784 cache->flags &= ~PLANE_HAS_FENCE;
781} 785}
782 786
783static bool intel_fbc_can_activate(struct intel_crtc *crtc) 787static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -816,7 +820,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
816 * so have no fence associated with it) due to aperture constaints 820 * so have no fence associated with it) due to aperture constaints
817 * at the time of pinning. 821 * at the time of pinning.
818 */ 822 */
819 if (!cache->vma->fence) { 823 if (!(cache->flags & PLANE_HAS_FENCE)) {
820 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 824 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
821 return false; 825 return false;
822 } 826 }
@@ -897,6 +901,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
897 memset(params, 0, sizeof(*params)); 901 memset(params, 0, sizeof(*params));
898 902
899 params->vma = cache->vma; 903 params->vma = cache->vma;
904 params->flags = cache->flags;
900 905
901 params->crtc.pipe = crtc->pipe; 906 params->crtc.pipe = crtc->pipe;
902 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; 907 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index da48af11eb6b..055f409f8b75 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -48,7 +48,8 @@
48static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev) 48static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
49{ 49{
50 struct drm_i915_gem_object *obj = ifbdev->fb->obj; 50 struct drm_i915_gem_object *obj = ifbdev->fb->obj;
51 unsigned int origin = ifbdev->vma->fence ? ORIGIN_GTT : ORIGIN_CPU; 51 unsigned int origin =
52 ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
52 53
53 intel_fb_obj_invalidate(obj, origin); 54 intel_fb_obj_invalidate(obj, origin);
54} 55}
@@ -177,6 +178,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
177 struct fb_info *info; 178 struct fb_info *info;
178 struct drm_framebuffer *fb; 179 struct drm_framebuffer *fb;
179 struct i915_vma *vma; 180 struct i915_vma *vma;
181 unsigned long flags = 0;
180 bool prealloc = false; 182 bool prealloc = false;
181 void __iomem *vaddr; 183 void __iomem *vaddr;
182 int ret; 184 int ret;
@@ -211,7 +213,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
211 * This also validates that any existing fb inherited from the 213 * This also validates that any existing fb inherited from the
212 * BIOS is suitable for own access. 214 * BIOS is suitable for own access.
213 */ 215 */
214 vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_MODE_ROTATE_0); 216 vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
217 DRM_MODE_ROTATE_0,
218 &flags);
215 if (IS_ERR(vma)) { 219 if (IS_ERR(vma)) {
216 ret = PTR_ERR(vma); 220 ret = PTR_ERR(vma);
217 goto out_unlock; 221 goto out_unlock;
@@ -268,6 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
268 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", 272 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
269 fb->width, fb->height, i915_ggtt_offset(vma)); 273 fb->width, fb->height, i915_ggtt_offset(vma));
270 ifbdev->vma = vma; 274 ifbdev->vma = vma;
275 ifbdev->vma_flags = flags;
271 276
272 intel_runtime_pm_put(dev_priv); 277 intel_runtime_pm_put(dev_priv);
273 mutex_unlock(&dev->struct_mutex); 278 mutex_unlock(&dev->struct_mutex);
@@ -275,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
275 return 0; 280 return 0;
276 281
277out_unpin: 282out_unpin:
278 intel_unpin_fb_vma(vma); 283 intel_unpin_fb_vma(vma, flags);
279out_unlock: 284out_unlock:
280 intel_runtime_pm_put(dev_priv); 285 intel_runtime_pm_put(dev_priv);
281 mutex_unlock(&dev->struct_mutex); 286 mutex_unlock(&dev->struct_mutex);
@@ -513,7 +518,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
513 518
514 if (ifbdev->vma) { 519 if (ifbdev->vma) {
515 mutex_lock(&ifbdev->helper.dev->struct_mutex); 520 mutex_lock(&ifbdev->helper.dev->struct_mutex);
516 intel_unpin_fb_vma(ifbdev->vma); 521 intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
517 mutex_unlock(&ifbdev->helper.dev->struct_mutex); 522 mutex_unlock(&ifbdev->helper.dev->struct_mutex);
518 } 523 }
519 524
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 1f3a8786bbdc..946766b62459 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -688,7 +688,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
688 goto unlock; 688 goto unlock;
689 689
690 if (port_isset(port)) { 690 if (port_isset(port)) {
691 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { 691 if (engine->i915->preempt_context) {
692 struct guc_preempt_work *preempt_work = 692 struct guc_preempt_work *preempt_work =
693 &engine->i915->guc.preempt_work[engine->id]; 693 &engine->i915->guc.preempt_work[engine->id];
694 694
@@ -747,6 +747,12 @@ done:
747 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); 747 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
748 guc_submit(engine); 748 guc_submit(engine);
749 } 749 }
750
751 /* We must always keep the beast fed if we have work piled up */
752 GEM_BUG_ON(port_isset(execlists->port) &&
753 !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
754 GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
755
750unlock: 756unlock:
751 spin_unlock_irq(&engine->timeline->lock); 757 spin_unlock_irq(&engine->timeline->lock);
752} 758}
@@ -832,10 +838,12 @@ static int guc_clients_doorbell_init(struct intel_guc *guc)
832 if (ret) 838 if (ret)
833 return ret; 839 return ret;
834 840
835 ret = create_doorbell(guc->preempt_client); 841 if (guc->preempt_client) {
836 if (ret) { 842 ret = create_doorbell(guc->preempt_client);
837 destroy_doorbell(guc->execbuf_client); 843 if (ret) {
838 return ret; 844 destroy_doorbell(guc->execbuf_client);
845 return ret;
846 }
839 } 847 }
840 848
841 return 0; 849 return 0;
@@ -848,8 +856,11 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc)
848 * Instead of trying (in vain) to communicate with it, let's just 856 * Instead of trying (in vain) to communicate with it, let's just
849 * cleanup the doorbell HW and our internal state. 857 * cleanup the doorbell HW and our internal state.
850 */ 858 */
851 __destroy_doorbell(guc->preempt_client); 859 if (guc->preempt_client) {
852 __update_doorbell_desc(guc->preempt_client, GUC_DOORBELL_INVALID); 860 __destroy_doorbell(guc->preempt_client);
861 __update_doorbell_desc(guc->preempt_client,
862 GUC_DOORBELL_INVALID);
863 }
853 __destroy_doorbell(guc->execbuf_client); 864 __destroy_doorbell(guc->execbuf_client);
854 __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID); 865 __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
855} 866}
@@ -979,17 +990,19 @@ static int guc_clients_create(struct intel_guc *guc)
979 } 990 }
980 guc->execbuf_client = client; 991 guc->execbuf_client = client;
981 992
982 client = guc_client_alloc(dev_priv, 993 if (dev_priv->preempt_context) {
983 INTEL_INFO(dev_priv)->ring_mask, 994 client = guc_client_alloc(dev_priv,
984 GUC_CLIENT_PRIORITY_KMD_HIGH, 995 INTEL_INFO(dev_priv)->ring_mask,
985 dev_priv->preempt_context); 996 GUC_CLIENT_PRIORITY_KMD_HIGH,
986 if (IS_ERR(client)) { 997 dev_priv->preempt_context);
987 DRM_ERROR("Failed to create GuC client for preemption!\n"); 998 if (IS_ERR(client)) {
988 guc_client_free(guc->execbuf_client); 999 DRM_ERROR("Failed to create GuC client for preemption!\n");
989 guc->execbuf_client = NULL; 1000 guc_client_free(guc->execbuf_client);
990 return PTR_ERR(client); 1001 guc->execbuf_client = NULL;
1002 return PTR_ERR(client);
1003 }
1004 guc->preempt_client = client;
991 } 1005 }
992 guc->preempt_client = client;
993 1006
994 return 0; 1007 return 0;
995} 1008}
@@ -998,10 +1011,11 @@ static void guc_clients_destroy(struct intel_guc *guc)
998{ 1011{
999 struct intel_guc_client *client; 1012 struct intel_guc_client *client;
1000 1013
1001 client = fetch_and_zero(&guc->execbuf_client);
1002 guc_client_free(client);
1003
1004 client = fetch_and_zero(&guc->preempt_client); 1014 client = fetch_and_zero(&guc->preempt_client);
1015 if (client)
1016 guc_client_free(client);
1017
1018 client = fetch_and_zero(&guc->execbuf_client);
1005 guc_client_free(client); 1019 guc_client_free(client);
1006} 1020}
1007 1021
@@ -1160,7 +1174,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
1160 GEM_BUG_ON(!guc->execbuf_client); 1174 GEM_BUG_ON(!guc->execbuf_client);
1161 1175
1162 guc_reset_wq(guc->execbuf_client); 1176 guc_reset_wq(guc->execbuf_client);
1163 guc_reset_wq(guc->preempt_client); 1177 if (guc->preempt_client)
1178 guc_reset_wq(guc->preempt_client);
1164 1179
1165 err = intel_guc_sample_forcewake(guc); 1180 err = intel_guc_sample_forcewake(guc);
1166 if (err) 1181 if (err)
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 8ed05182f944..ef9a05d8e5a9 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -118,7 +118,8 @@ void intel_huc_init_early(struct intel_huc *huc)
118 118
119/** 119/**
120 * huc_ucode_xfer() - DMA's the firmware 120 * huc_ucode_xfer() - DMA's the firmware
121 * @dev_priv: the drm_i915_private device 121 * @huc_fw: the firmware descriptor
122 * @vma: the firmware image (bound into the GGTT)
122 * 123 *
123 * Transfer the firmware image to RAM for execution by the microcontroller. 124 * Transfer the firmware image to RAM for execution by the microcontroller.
124 * 125 *
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 5809b29044fc..6269750e2b54 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -74,7 +74,6 @@
74static struct platform_device * 74static struct platform_device *
75lpe_audio_platdev_create(struct drm_i915_private *dev_priv) 75lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
76{ 76{
77 int ret;
78 struct drm_device *dev = &dev_priv->drm; 77 struct drm_device *dev = &dev_priv->drm;
79 struct platform_device_info pinfo = {}; 78 struct platform_device_info pinfo = {};
80 struct resource *rsc; 79 struct resource *rsc;
@@ -119,24 +118,19 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
119 spin_lock_init(&pdata->lpe_audio_slock); 118 spin_lock_init(&pdata->lpe_audio_slock);
120 119
121 platdev = platform_device_register_full(&pinfo); 120 platdev = platform_device_register_full(&pinfo);
121 kfree(rsc);
122 kfree(pdata);
123
122 if (IS_ERR(platdev)) { 124 if (IS_ERR(platdev)) {
123 ret = PTR_ERR(platdev);
124 DRM_ERROR("Failed to allocate LPE audio platform device\n"); 125 DRM_ERROR("Failed to allocate LPE audio platform device\n");
125 goto err; 126 return platdev;
126 } 127 }
127 128
128 kfree(rsc);
129
130 pm_runtime_forbid(&platdev->dev); 129 pm_runtime_forbid(&platdev->dev);
131 pm_runtime_set_active(&platdev->dev); 130 pm_runtime_set_active(&platdev->dev);
132 pm_runtime_enable(&platdev->dev); 131 pm_runtime_enable(&platdev->dev);
133 132
134 return platdev; 133 return platdev;
135
136err:
137 kfree(rsc);
138 kfree(pdata);
139 return ERR_PTR(ret);
140} 134}
141 135
142static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) 136static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 380c0838d8b3..9b6d781b22ec 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -161,7 +161,6 @@
161#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 161#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
162#define WA_TAIL_DWORDS 2 162#define WA_TAIL_DWORDS 2
163#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) 163#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
164#define PREEMPT_ID 0x1
165 164
166static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 165static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
167 struct intel_engine_cs *engine); 166 struct intel_engine_cs *engine);
@@ -448,7 +447,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
448 &engine->i915->preempt_context->engine[engine->id]; 447 &engine->i915->preempt_context->engine[engine->id];
449 unsigned int n; 448 unsigned int n;
450 449
451 GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID); 450 GEM_BUG_ON(engine->execlists.preempt_complete_status !=
451 upper_32_bits(ce->lrc_desc));
452 GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES)); 452 GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
453 453
454 memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES); 454 memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
@@ -528,7 +528,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
528 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) 528 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
529 goto unlock; 529 goto unlock;
530 530
531 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) && 531 if (engine->i915->preempt_context &&
532 rb_entry(rb, struct i915_priolist, node)->priority > 532 rb_entry(rb, struct i915_priolist, node)->priority >
533 max(last->priotree.priority, 0)) { 533 max(last->priotree.priority, 0)) {
534 /* 534 /*
@@ -642,6 +642,10 @@ done:
642 execlists->first = rb; 642 execlists->first = rb;
643 if (submit) 643 if (submit)
644 port_assign(port, last); 644 port_assign(port, last);
645
646 /* We must always keep the beast fed if we have work piled up */
647 GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
648
645unlock: 649unlock:
646 spin_unlock_irq(&engine->timeline->lock); 650 spin_unlock_irq(&engine->timeline->lock);
647 651
@@ -649,6 +653,9 @@ unlock:
649 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); 653 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
650 execlists_submit_ports(engine); 654 execlists_submit_ports(engine);
651 } 655 }
656
657 GEM_BUG_ON(port_isset(execlists->port) &&
658 !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
652} 659}
653 660
654void 661void
@@ -844,7 +851,7 @@ static void execlists_submission_tasklet(unsigned long data)
844 GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); 851 GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
845 852
846 if (status & GEN8_CTX_STATUS_COMPLETE && 853 if (status & GEN8_CTX_STATUS_COMPLETE &&
847 buf[2*head + 1] == PREEMPT_ID) { 854 buf[2*head + 1] == execlists->preempt_complete_status) {
848 GEM_TRACE("%s preempt-idle\n", engine->name); 855 GEM_TRACE("%s preempt-idle\n", engine->name);
849 856
850 execlists_cancel_port_requests(execlists); 857 execlists_cancel_port_requests(execlists);
@@ -1963,6 +1970,12 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
1963 engine->unpark = NULL; 1970 engine->unpark = NULL;
1964 1971
1965 engine->flags |= I915_ENGINE_SUPPORTS_STATS; 1972 engine->flags |= I915_ENGINE_SUPPORTS_STATS;
1973
1974 engine->i915->caps.scheduler =
1975 I915_SCHEDULER_CAP_ENABLED |
1976 I915_SCHEDULER_CAP_PRIORITY;
1977 if (engine->i915->preempt_context)
1978 engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
1966} 1979}
1967 1980
1968static void 1981static void
@@ -2039,6 +2052,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2039 engine->execlists.elsp = 2052 engine->execlists.elsp =
2040 engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); 2053 engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
2041 2054
2055 engine->execlists.preempt_complete_status = ~0u;
2056 if (engine->i915->preempt_context)
2057 engine->execlists.preempt_complete_status =
2058 upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
2059
2042 return 0; 2060 return 0;
2043 2061
2044error: 2062error:
@@ -2301,7 +2319,7 @@ populate_lr_context(struct i915_gem_context *ctx,
2301 if (!engine->default_state) 2319 if (!engine->default_state)
2302 regs[CTX_CONTEXT_CONTROL + 1] |= 2320 regs[CTX_CONTEXT_CONTROL + 1] |=
2303 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 2321 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
2304 if (ctx->hw_id == PREEMPT_ID) 2322 if (ctx == ctx->i915->preempt_context)
2305 regs[CTX_CONTEXT_CONTROL + 1] |= 2323 regs[CTX_CONTEXT_CONTROL + 1] |=
2306 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2324 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2307 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); 2325 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ef80499113ee..d35d2d50f595 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -189,7 +189,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
189 /* Convert from 100ms to 100us units */ 189 /* Convert from 100ms to 100us units */
190 pps->t4 = val * 1000; 190 pps->t4 = val * 1000;
191 191
192 if (INTEL_INFO(dev_priv)->gen <= 4 && 192 if (INTEL_GEN(dev_priv) <= 4 &&
193 pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) { 193 pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
194 DRM_DEBUG_KMS("Panel power timings uninitialized, " 194 DRM_DEBUG_KMS("Panel power timings uninitialized, "
195 "setting defaults\n"); 195 "setting defaults\n");
@@ -268,7 +268,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
268 /* set the corresponsding LVDS_BORDER bit */ 268 /* set the corresponsding LVDS_BORDER bit */
269 temp &= ~LVDS_BORDER_ENABLE; 269 temp &= ~LVDS_BORDER_ENABLE;
270 temp |= pipe_config->gmch_pfit.lvds_border_bits; 270 temp |= pipe_config->gmch_pfit.lvds_border_bits;
271 /* Set the B0-B3 data pairs corresponding to whether we're going to 271
272 /*
273 * Set the B0-B3 data pairs corresponding to whether we're going to
272 * set the DPLLs for dual-channel mode or not. 274 * set the DPLLs for dual-channel mode or not.
273 */ 275 */
274 if (lvds_encoder->is_dual_link) 276 if (lvds_encoder->is_dual_link)
@@ -276,7 +278,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
276 else 278 else
277 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 279 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
278 280
279 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 281 /*
282 * It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
280 * appropriately here, but we need to look more thoroughly into how 283 * appropriately here, but we need to look more thoroughly into how
281 * panels behave in the two modes. For now, let's just maintain the 284 * panels behave in the two modes. For now, let's just maintain the
282 * value we got from the BIOS. 285 * value we got from the BIOS.
@@ -284,12 +287,16 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
284 temp &= ~LVDS_A3_POWER_MASK; 287 temp &= ~LVDS_A3_POWER_MASK;
285 temp |= lvds_encoder->a3_power; 288 temp |= lvds_encoder->a3_power;
286 289
287 /* Set the dithering flag on LVDS as needed, note that there is no 290 /*
291 * Set the dithering flag on LVDS as needed, note that there is no
288 * special lvds dither control bit on pch-split platforms, dithering is 292 * special lvds dither control bit on pch-split platforms, dithering is
289 * only controlled through the PIPECONF reg. */ 293 * only controlled through the PIPECONF reg.
294 */
290 if (IS_GEN4(dev_priv)) { 295 if (IS_GEN4(dev_priv)) {
291 /* Bspec wording suggests that LVDS port dithering only exists 296 /*
292 * for 18bpp panels. */ 297 * Bspec wording suggests that LVDS port dithering only exists
298 * for 18bpp panels.
299 */
293 if (pipe_config->dither && pipe_config->pipe_bpp == 18) 300 if (pipe_config->dither && pipe_config->pipe_bpp == 18)
294 temp |= LVDS_ENABLE_DITHER; 301 temp |= LVDS_ENABLE_DITHER;
295 else 302 else
@@ -304,7 +311,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
304 I915_WRITE(lvds_encoder->reg, temp); 311 I915_WRITE(lvds_encoder->reg, temp);
305} 312}
306 313
307/** 314/*
308 * Sets the power state for the panel. 315 * Sets the power state for the panel.
309 */ 316 */
310static void intel_enable_lvds(struct intel_encoder *encoder, 317static void intel_enable_lvds(struct intel_encoder *encoder,
@@ -441,7 +448,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
441 return true; 448 return true;
442} 449}
443 450
444/** 451/*
445 * Detect the LVDS connection. 452 * Detect the LVDS connection.
446 * 453 *
447 * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means 454 * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
@@ -464,7 +471,7 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
464 return connector_status_connected; 471 return connector_status_connected;
465} 472}
466 473
467/** 474/*
468 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. 475 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
469 */ 476 */
470static int intel_lvds_get_modes(struct drm_connector *connector) 477static int intel_lvds_get_modes(struct drm_connector *connector)
@@ -893,7 +900,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
893 if (dmi_check_system(intel_dual_link_lvds)) 900 if (dmi_check_system(intel_dual_link_lvds))
894 return true; 901 return true;
895 902
896 /* BIOS should set the proper LVDS register value at boot, but 903 /*
904 * BIOS should set the proper LVDS register value at boot, but
897 * in reality, it doesn't set the value when the lid is closed; 905 * in reality, it doesn't set the value when the lid is closed;
898 * we need to check "the value to be set" in VBT when LVDS 906 * we need to check "the value to be set" in VBT when LVDS
899 * register is uninitialized. 907 * register is uninitialized.
@@ -907,13 +915,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
907 915
908static bool intel_lvds_supported(struct drm_i915_private *dev_priv) 916static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
909{ 917{
910 /* With the introduction of the PCH we gained a dedicated 918 /*
911 * LVDS presence pin, use it. */ 919 * With the introduction of the PCH we gained a dedicated
920 * LVDS presence pin, use it.
921 */
912 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 922 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
913 return true; 923 return true;
914 924
915 /* Otherwise LVDS was only attached to mobile products, 925 /*
916 * except for the inglorious 830gm */ 926 * Otherwise LVDS was only attached to mobile products,
927 * except for the inglorious 830gm
928 */
917 if (INTEL_GEN(dev_priv) <= 4 && 929 if (INTEL_GEN(dev_priv) <= 4 &&
918 IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) 930 IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
919 return true; 931 return true;
@@ -923,7 +935,7 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
923 935
924/** 936/**
925 * intel_lvds_init - setup LVDS connectors on this device 937 * intel_lvds_init - setup LVDS connectors on this device
926 * @dev: drm device 938 * @dev_priv: i915 device
927 * 939 *
928 * Create the connector, register the LVDS DDC bus, and try to figure out what 940 * Create the connector, register the LVDS DDC bus, and try to figure out what
929 * modes we can display on the LVDS panel (if present). 941 * modes we can display on the LVDS panel (if present).
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index f4c46b0b8f0a..abb7a8c1e340 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -187,7 +187,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
187 table->table = broxton_mocs_table; 187 table->table = broxton_mocs_table;
188 result = true; 188 result = true;
189 } else { 189 } else {
190 WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, 190 WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
191 "Platform that should have a MOCS table does not.\n"); 191 "Platform that should have a MOCS table does not.\n");
192 } 192 }
193 193
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 41e9465d44a8..89f568e739ee 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -801,7 +801,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
801 801
802 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 802 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
803 803
804 vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); 804 vma = i915_gem_object_pin_to_display_plane(new_bo,
805 0, NULL, PIN_MAPPABLE);
805 if (IS_ERR(vma)) { 806 if (IS_ERR(vma)) {
806 ret = PTR_ERR(vma); 807 ret = PTR_ERR(vma);
807 goto out_pin_section; 808 goto out_pin_section;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e702a6487aa9..41d00b1603e3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -397,8 +397,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
397 397
398/** 398/**
399 * scale - scale values from one range to another 399 * scale - scale values from one range to another
400 *
401 * @source_val: value in range [@source_min..@source_max] 400 * @source_val: value in range [@source_min..@source_max]
401 * @source_min: minimum legal value for @source_val
402 * @source_max: maximum legal value for @source_val
403 * @target_min: corresponding target value for @source_min
404 * @target_max: corresponding target value for @source_max
402 * 405 *
403 * Return @source_val in range [@source_min..@source_max] scaled to range 406 * Return @source_val in range [@source_min..@source_max] scaled to range
404 * [@target_min..@target_max]. 407 * [@target_min..@target_max].
@@ -416,8 +419,9 @@ static uint32_t scale(uint32_t source_val,
416 source_val = clamp(source_val, source_min, source_max); 419 source_val = clamp(source_val, source_min, source_max);
417 420
418 /* avoid overflows */ 421 /* avoid overflows */
419 target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) * 422 target_val = mul_u32_u32(source_val - source_min,
420 (target_max - target_min), source_max - source_min); 423 target_max - target_min);
424 target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
421 target_val += target_min; 425 target_val += target_min;
422 426
423 return target_val; 427 return target_val;
@@ -497,7 +501,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
497 u32 val; 501 u32 val;
498 502
499 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 503 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
500 if (INTEL_INFO(dev_priv)->gen < 4) 504 if (INTEL_GEN(dev_priv) < 4)
501 val >>= 1; 505 val >>= 1;
502 506
503 if (panel->backlight.combination_mode) { 507 if (panel->backlight.combination_mode) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eb68abf6a8e9..abf80e462833 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -729,6 +729,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
729 * intel_calculate_wm - calculate watermark level 729 * intel_calculate_wm - calculate watermark level
730 * @pixel_rate: pixel clock 730 * @pixel_rate: pixel clock
731 * @wm: chip FIFO params 731 * @wm: chip FIFO params
732 * @fifo_size: size of the FIFO buffer
732 * @cpp: bytes per pixel 733 * @cpp: bytes per pixel
733 * @latency_ns: memory latency for the platform 734 * @latency_ns: memory latency for the platform
734 * 735 *
@@ -2916,10 +2917,6 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2916 /* ILK cursor LP0 latency is 1300 ns */ 2917 /* ILK cursor LP0 latency is 1300 ns */
2917 if (IS_GEN5(dev_priv)) 2918 if (IS_GEN5(dev_priv))
2918 wm[0] = 13; 2919 wm[0] = 13;
2919
2920 /* WaDoubleCursorLP3Latency:ivb */
2921 if (IS_IVYBRIDGE(dev_priv))
2922 wm[3] *= 2;
2923} 2920}
2924 2921
2925int ilk_wm_max_level(const struct drm_i915_private *dev_priv) 2922int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
@@ -4596,7 +4593,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4596 min_disp_buf_needed = res_blocks; 4593 min_disp_buf_needed = res_blocks;
4597 } 4594 }
4598 4595
4599 if (res_blocks >= ddb_allocation || res_lines > 31 || 4596 if ((level > 0 && res_lines > 31) ||
4597 res_blocks >= ddb_allocation ||
4600 min_disp_buf_needed >= ddb_allocation) { 4598 min_disp_buf_needed >= ddb_allocation) {
4601 *enabled = false; 4599 *enabled = false;
4602 4600
@@ -4617,8 +4615,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4617 } 4615 }
4618 } 4616 }
4619 4617
4618 /* The number of lines are ignored for the level 0 watermark. */
4619 *out_lines = level ? res_lines : 0;
4620 *out_blocks = res_blocks; 4620 *out_blocks = res_blocks;
4621 *out_lines = res_lines;
4622 *enabled = true; 4621 *enabled = true;
4623 4622
4624 return 0; 4623 return 0;
@@ -4710,6 +4709,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4710 if (!dev_priv->ipc_enabled) 4709 if (!dev_priv->ipc_enabled)
4711 goto exit; 4710 goto exit;
4712 4711
4712 trans_min = 0;
4713 if (INTEL_GEN(dev_priv) >= 10) 4713 if (INTEL_GEN(dev_priv) >= 10)
4714 trans_min = 4; 4714 trans_min = 4;
4715 4715
@@ -5864,6 +5864,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
5864 5864
5865/** 5865/**
5866 * intel_update_watermarks - update FIFO watermark values based on current modes 5866 * intel_update_watermarks - update FIFO watermark values based on current modes
5867 * @crtc: the #intel_crtc on which to compute the WM
5867 * 5868 *
5868 * Calculate watermark values for the various WM regs based on current mode 5869 * Calculate watermark values for the various WM regs based on current mode
5869 * and plane configuration. 5870 * and plane configuration.
@@ -6372,12 +6373,15 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
6372 if (!rps->enabled) 6373 if (!rps->enabled)
6373 return; 6374 return;
6374 6375
6376 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
6377 return;
6378
6379 /* Serializes with i915_gem_request_retire() */
6375 boost = false; 6380 boost = false;
6376 spin_lock_irqsave(&rq->lock, flags); 6381 spin_lock_irqsave(&rq->lock, flags);
6377 if (!rq->waitboost && !i915_gem_request_completed(rq)) { 6382 if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
6378 atomic_inc(&rps->num_waiters); 6383 boost = !atomic_fetch_inc(&rps->num_waiters);
6379 rq->waitboost = true; 6384 rq->waitboost = true;
6380 boost = true;
6381 } 6385 }
6382 spin_unlock_irqrestore(&rq->lock, flags); 6386 spin_unlock_irqrestore(&rq->lock, flags);
6383 if (!boost) 6387 if (!boost)
@@ -6938,7 +6942,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6938 * No floor required for ring frequency on SKL. 6942 * No floor required for ring frequency on SKL.
6939 */ 6943 */
6940 ring_freq = gpu_freq; 6944 ring_freq = gpu_freq;
6941 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 6945 } else if (INTEL_GEN(dev_priv) >= 8) {
6942 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 6946 /* max(2 * GT, DDR). NB: GT is 50MHz units */
6943 ring_freq = max(min_ring_freq, gpu_freq); 6947 ring_freq = max(min_ring_freq, gpu_freq);
6944 } else if (IS_HASWELL(dev_priv)) { 6948 } else if (IS_HASWELL(dev_priv)) {
@@ -7549,7 +7553,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
7549{ 7553{
7550 unsigned long val; 7554 unsigned long val;
7551 7555
7552 if (INTEL_INFO(dev_priv)->gen != 5) 7556 if (!IS_GEN5(dev_priv))
7553 return 0; 7557 return 0;
7554 7558
7555 spin_lock_irq(&mchdev_lock); 7559 spin_lock_irq(&mchdev_lock);
@@ -7633,7 +7637,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
7633 7637
7634void i915_update_gfx_val(struct drm_i915_private *dev_priv) 7638void i915_update_gfx_val(struct drm_i915_private *dev_priv)
7635{ 7639{
7636 if (INTEL_INFO(dev_priv)->gen != 5) 7640 if (!IS_GEN5(dev_priv))
7637 return; 7641 return;
7638 7642
7639 spin_lock_irq(&mchdev_lock); 7643 spin_lock_irq(&mchdev_lock);
@@ -7684,7 +7688,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
7684{ 7688{
7685 unsigned long val; 7689 unsigned long val;
7686 7690
7687 if (INTEL_INFO(dev_priv)->gen != 5) 7691 if (!IS_GEN5(dev_priv))
7688 return 0; 7692 return 0;
7689 7693
7690 spin_lock_irq(&mchdev_lock); 7694 spin_lock_irq(&mchdev_lock);
@@ -9415,15 +9419,16 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
9415 const i915_reg_t reg) 9419 const i915_reg_t reg)
9416{ 9420{
9417 u32 lower, upper, tmp; 9421 u32 lower, upper, tmp;
9418 unsigned long flags;
9419 int loop = 2; 9422 int loop = 2;
9420 9423
9421 /* The register accessed do not need forcewake. We borrow 9424 /*
9425 * The register accessed do not need forcewake. We borrow
9422 * uncore lock to prevent concurrent access to range reg. 9426 * uncore lock to prevent concurrent access to range reg.
9423 */ 9427 */
9424 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 9428 lockdep_assert_held(&dev_priv->uncore.lock);
9425 9429
9426 /* vlv and chv residency counters are 40 bits in width. 9430 /*
9431 * vlv and chv residency counters are 40 bits in width.
9427 * With a control bit, we can choose between upper or lower 9432 * With a control bit, we can choose between upper or lower
9428 * 32bit window into this counter. 9433 * 32bit window into this counter.
9429 * 9434 *
@@ -9447,29 +9452,49 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
9447 upper = I915_READ_FW(reg); 9452 upper = I915_READ_FW(reg);
9448 } while (upper != tmp && --loop); 9453 } while (upper != tmp && --loop);
9449 9454
9450 /* Everywhere else we always use VLV_COUNTER_CONTROL with the 9455 /*
9456 * Everywhere else we always use VLV_COUNTER_CONTROL with the
9451 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set 9457 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
9452 * now. 9458 * now.
9453 */ 9459 */
9454 9460
9455 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
9456
9457 return lower | (u64)upper << 8; 9461 return lower | (u64)upper << 8;
9458} 9462}
9459 9463
9460u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, 9464u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
9461 const i915_reg_t reg) 9465 const i915_reg_t reg)
9462{ 9466{
9463 u64 time_hw; 9467 u64 time_hw, prev_hw, overflow_hw;
9468 unsigned int fw_domains;
9469 unsigned long flags;
9470 unsigned int i;
9464 u32 mul, div; 9471 u32 mul, div;
9465 9472
9466 if (!HAS_RC6(dev_priv)) 9473 if (!HAS_RC6(dev_priv))
9467 return 0; 9474 return 0;
9468 9475
9476 /*
9477 * Store previous hw counter values for counter wrap-around handling.
9478 *
9479 * There are only four interesting registers and they live next to each
9480 * other so we can use the relative address, compared to the smallest
9481 * one as the index into driver storage.
9482 */
9483 i = (i915_mmio_reg_offset(reg) -
9484 i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
9485 if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
9486 return 0;
9487
9488 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
9489
9490 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
9491 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
9492
9469 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ 9493 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
9470 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 9494 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9471 mul = 1000000; 9495 mul = 1000000;
9472 div = dev_priv->czclk_freq; 9496 div = dev_priv->czclk_freq;
9497 overflow_hw = BIT_ULL(40);
9473 time_hw = vlv_residency_raw(dev_priv, reg); 9498 time_hw = vlv_residency_raw(dev_priv, reg);
9474 } else { 9499 } else {
9475 /* 833.33ns units on Gen9LP, 1.28us elsewhere. */ 9500 /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
@@ -9481,10 +9506,33 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
9481 div = 1; 9506 div = 1;
9482 } 9507 }
9483 9508
9484 time_hw = I915_READ(reg); 9509 overflow_hw = BIT_ULL(32);
9510 time_hw = I915_READ_FW(reg);
9485 } 9511 }
9486 9512
9487 return DIV_ROUND_UP_ULL(time_hw * mul, div); 9513 /*
9514 * Counter wrap handling.
9515 *
9516 * But relying on a sufficient frequency of queries otherwise counters
9517 * can still wrap.
9518 */
9519 prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
9520 dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
9521
9522 /* RC6 delta from last sample. */
9523 if (time_hw >= prev_hw)
9524 time_hw -= prev_hw;
9525 else
9526 time_hw += overflow_hw - prev_hw;
9527
9528 /* Add delta to RC6 extended raw driver copy. */
9529 time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
9530 dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
9531
9532 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
9533 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
9534
9535 return mul_u64_u32_div(time_hw, mul, div);
9488} 9536}
9489 9537
9490u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat) 9538u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index e9feffdea899..2ef374f936b9 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -126,7 +126,7 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
126static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, 126static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
127 enum port port) 127 enum port port)
128{ 128{
129 if (INTEL_INFO(dev_priv)->gen >= 9) 129 if (INTEL_GEN(dev_priv) >= 9)
130 return DP_AUX_CH_CTL(port); 130 return DP_AUX_CH_CTL(port);
131 else 131 else
132 return EDP_PSR_AUX_CTL; 132 return EDP_PSR_AUX_CTL;
@@ -135,7 +135,7 @@ static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
135static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, 135static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
136 enum port port, int index) 136 enum port port, int index)
137{ 137{
138 if (INTEL_INFO(dev_priv)->gen >= 9) 138 if (INTEL_GEN(dev_priv) >= 9)
139 return DP_AUX_CH_DATA(port, index); 139 return DP_AUX_CH_DATA(port, index);
140 else 140 else
141 return EDP_PSR_AUX_DATA(index); 141 return EDP_PSR_AUX_DATA(index);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e2085820b586..5718f37160c5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -137,7 +137,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
137 return 0; 137 return 0;
138} 138}
139 139
140/** 140/*
141 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 141 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
142 * implementing two workarounds on gen6. From section 1.4.7.1 142 * implementing two workarounds on gen6. From section 1.4.7.1
143 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 143 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
@@ -453,13 +453,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
453 453
454 if (!stop_ring(engine)) { 454 if (!stop_ring(engine)) {
455 /* G45 ring initialization often fails to reset head to zero */ 455 /* G45 ring initialization often fails to reset head to zero */
456 DRM_DEBUG_KMS("%s head not reset to zero " 456 DRM_DEBUG_DRIVER("%s head not reset to zero "
457 "ctl %08x head %08x tail %08x start %08x\n", 457 "ctl %08x head %08x tail %08x start %08x\n",
458 engine->name, 458 engine->name,
459 I915_READ_CTL(engine), 459 I915_READ_CTL(engine),
460 I915_READ_HEAD(engine), 460 I915_READ_HEAD(engine),
461 I915_READ_TAIL(engine), 461 I915_READ_TAIL(engine),
462 I915_READ_START(engine)); 462 I915_READ_START(engine));
463 463
464 if (!stop_ring(engine)) { 464 if (!stop_ring(engine)) {
465 DRM_ERROR("failed to set %s head to zero " 465 DRM_ERROR("failed to set %s head to zero "
@@ -492,8 +492,8 @@ static int init_ring_common(struct intel_engine_cs *engine)
492 492
493 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 493 /* WaClearRingBufHeadRegAtInit:ctg,elk */
494 if (I915_READ_HEAD(engine)) 494 if (I915_READ_HEAD(engine))
495 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 495 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
496 engine->name, I915_READ_HEAD(engine)); 496 engine->name, I915_READ_HEAD(engine));
497 497
498 intel_ring_update_space(ring); 498 intel_ring_update_space(ring);
499 I915_WRITE_HEAD(engine, ring->head); 499 I915_WRITE_HEAD(engine, ring->head);
@@ -655,7 +655,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
655 if (IS_GEN(dev_priv, 6, 7)) 655 if (IS_GEN(dev_priv, 6, 7))
656 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 656 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
657 657
658 if (INTEL_INFO(dev_priv)->gen >= 6) 658 if (INTEL_GEN(dev_priv) >= 6)
659 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 659 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
660 660
661 return init_workarounds_ring(engine); 661 return init_workarounds_ring(engine);
@@ -729,14 +729,6 @@ static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
729 729
730static const int i9xx_emit_breadcrumb_sz = 4; 730static const int i9xx_emit_breadcrumb_sz = 4;
731 731
732/**
733 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
734 *
735 * @request - request to write to the ring
736 *
737 * Update the mailbox registers in the *other* rings with the current seqno.
738 * This acts like a signal in the canonical semaphore.
739 */
740static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 732static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
741{ 733{
742 return i9xx_emit_breadcrumb(req, 734 return i9xx_emit_breadcrumb(req,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a0e7a6c2a57c..51523ad049de 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -279,6 +279,11 @@ struct intel_engine_execlists {
279 * @csb_use_mmio: access csb through mmio, instead of hwsp 279 * @csb_use_mmio: access csb through mmio, instead of hwsp
280 */ 280 */
281 bool csb_use_mmio; 281 bool csb_use_mmio;
282
283 /**
284 * @preempt_complete_status: expected CSB upon completing preemption
285 */
286 u32 preempt_complete_status;
282}; 287};
283 288
284#define INTEL_ENGINE_CS_MAX_NAME 8 289#define INTEL_ENGINE_CS_MAX_NAME 8
@@ -654,7 +659,7 @@ intel_engine_flag(const struct intel_engine_cs *engine)
654} 659}
655 660
656static inline u32 661static inline u32
657intel_read_status_page(struct intel_engine_cs *engine, int reg) 662intel_read_status_page(const struct intel_engine_cs *engine, int reg)
658{ 663{
659 /* Ensure that the compiler doesn't optimize away the load. */ 664 /* Ensure that the compiler doesn't optimize away the load. */
660 return READ_ONCE(engine->status_page.page_addr[reg]); 665 return READ_ONCE(engine->status_page.page_addr[reg]);
@@ -812,8 +817,8 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
812int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); 817int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
813int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); 818int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
814 819
815u64 intel_engine_get_active_head(struct intel_engine_cs *engine); 820u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
816u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); 821u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
817 822
818static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) 823static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
819{ 824{
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 70e659772a7a..b7924feb9f27 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2646,6 +2646,48 @@ static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2646 DRM_ERROR("DBuf power disable timeout!\n"); 2646 DRM_ERROR("DBuf power disable timeout!\n");
2647} 2647}
2648 2648
2649/*
2650 * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
2651 * needed and keep it disabled as much as possible.
2652 */
2653static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
2654{
2655 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
2656 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
2657 POSTING_READ(DBUF_CTL_S2);
2658
2659 udelay(10);
2660
2661 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2662 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2663 DRM_ERROR("DBuf power enable timeout\n");
2664}
2665
2666static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
2667{
2668 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
2669 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
2670 POSTING_READ(DBUF_CTL_S2);
2671
2672 udelay(10);
2673
2674 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2675 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2676 DRM_ERROR("DBuf power disable timeout!\n");
2677}
2678
2679static void icl_mbus_init(struct drm_i915_private *dev_priv)
2680{
2681 uint32_t val;
2682
2683 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
2684 MBUS_ABOX_BT_CREDIT_POOL2(16) |
2685 MBUS_ABOX_B_CREDIT(1) |
2686 MBUS_ABOX_BW_CREDIT(1);
2687
2688 I915_WRITE(MBUS_ABOX_CTL, val);
2689}
2690
2649static void skl_display_core_init(struct drm_i915_private *dev_priv, 2691static void skl_display_core_init(struct drm_i915_private *dev_priv,
2650 bool resume) 2692 bool resume)
2651{ 2693{
@@ -2794,12 +2836,19 @@ static const struct cnl_procmon {
2794 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, 2836 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2795}; 2837};
2796 2838
2797static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv) 2839/*
2840 * CNL has just one set of registers, while ICL has two sets: one for port A and
2841 * the other for port B. The CNL registers are equivalent to the ICL port A
2842 * registers, that's why we call the ICL macros even though the function has CNL
2843 * on its name.
2844 */
2845static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
2846 enum port port)
2798{ 2847{
2799 const struct cnl_procmon *procmon; 2848 const struct cnl_procmon *procmon;
2800 u32 val; 2849 u32 val;
2801 2850
2802 val = I915_READ(CNL_PORT_COMP_DW3); 2851 val = I915_READ(ICL_PORT_COMP_DW3(port));
2803 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { 2852 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
2804 default: 2853 default:
2805 MISSING_CASE(val); 2854 MISSING_CASE(val);
@@ -2820,13 +2869,13 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
2820 break; 2869 break;
2821 } 2870 }
2822 2871
2823 val = I915_READ(CNL_PORT_COMP_DW1); 2872 val = I915_READ(ICL_PORT_COMP_DW1(port));
2824 val &= ~((0xff << 16) | 0xff); 2873 val &= ~((0xff << 16) | 0xff);
2825 val |= procmon->dw1; 2874 val |= procmon->dw1;
2826 I915_WRITE(CNL_PORT_COMP_DW1, val); 2875 I915_WRITE(ICL_PORT_COMP_DW1(port), val);
2827 2876
2828 I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9); 2877 I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
2829 I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10); 2878 I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
2830} 2879}
2831 2880
2832static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 2881static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
@@ -2847,7 +2896,8 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
2847 val &= ~CNL_COMP_PWR_DOWN; 2896 val &= ~CNL_COMP_PWR_DOWN;
2848 I915_WRITE(CHICKEN_MISC_2, val); 2897 I915_WRITE(CHICKEN_MISC_2, val);
2849 2898
2850 cnl_set_procmon_ref_values(dev_priv); 2899 /* Dummy PORT_A to get the correct CNL register from the ICL macro */
2900 cnl_set_procmon_ref_values(dev_priv, PORT_A);
2851 2901
2852 val = I915_READ(CNL_PORT_COMP_DW0); 2902 val = I915_READ(CNL_PORT_COMP_DW0);
2853 val |= COMP_INIT; 2903 val |= COMP_INIT;
@@ -2911,6 +2961,80 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2911 I915_WRITE(CHICKEN_MISC_2, val); 2961 I915_WRITE(CHICKEN_MISC_2, val);
2912} 2962}
2913 2963
2964static void icl_display_core_init(struct drm_i915_private *dev_priv,
2965 bool resume)
2966{
2967 enum port port;
2968 u32 val;
2969
2970 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2971
2972 /* 1. Enable PCH reset handshake. */
2973 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2974 val |= RESET_PCH_HANDSHAKE_ENABLE;
2975 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2976
2977 for (port = PORT_A; port <= PORT_B; port++) {
2978 /* 2. Enable DDI combo PHY comp. */
2979 val = I915_READ(ICL_PHY_MISC(port));
2980 val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
2981 I915_WRITE(ICL_PHY_MISC(port), val);
2982
2983 cnl_set_procmon_ref_values(dev_priv, port);
2984
2985 val = I915_READ(ICL_PORT_COMP_DW0(port));
2986 val |= COMP_INIT;
2987 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
2988
2989 /* 3. Set power down enable. */
2990 val = I915_READ(ICL_PORT_CL_DW5(port));
2991 val |= CL_POWER_DOWN_ENABLE;
2992 I915_WRITE(ICL_PORT_CL_DW5(port), val);
2993 }
2994
2995 /* 4. Enable power well 1 (PG1) and aux IO power. */
2996 /* FIXME: ICL power wells code not here yet. */
2997
2998 /* 5. Enable CDCLK. */
2999 icl_init_cdclk(dev_priv);
3000
3001 /* 6. Enable DBUF. */
3002 icl_dbuf_enable(dev_priv);
3003
3004 /* 7. Setup MBUS. */
3005 icl_mbus_init(dev_priv);
3006
3007 /* 8. CHICKEN_DCPR_1 */
3008 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
3009 CNL_DDI_CLOCK_REG_ACCESS_ON);
3010}
3011
3012static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3013{
3014 enum port port;
3015 u32 val;
3016
3017 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3018
3019 /* 1. Disable all display engine functions -> aready done */
3020
3021 /* 2. Disable DBUF */
3022 icl_dbuf_disable(dev_priv);
3023
3024 /* 3. Disable CD clock */
3025 icl_uninit_cdclk(dev_priv);
3026
3027 /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
3028 /* FIXME: ICL power wells code not here yet. */
3029
3030 /* 5. Disable Comp */
3031 for (port = PORT_A; port <= PORT_B; port++) {
3032 val = I915_READ(ICL_PHY_MISC(port));
3033 val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3034 I915_WRITE(ICL_PHY_MISC(port), val);
3035 }
3036}
3037
2914static void chv_phy_control_init(struct drm_i915_private *dev_priv) 3038static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2915{ 3039{
2916 struct i915_power_well *cmn_bc = 3040 struct i915_power_well *cmn_bc =
@@ -3043,7 +3167,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3043 3167
3044 power_domains->initializing = true; 3168 power_domains->initializing = true;
3045 3169
3046 if (IS_CANNONLAKE(dev_priv)) { 3170 if (IS_ICELAKE(dev_priv)) {
3171 icl_display_core_init(dev_priv, resume);
3172 } else if (IS_CANNONLAKE(dev_priv)) {
3047 cnl_display_core_init(dev_priv, resume); 3173 cnl_display_core_init(dev_priv, resume);
3048 } else if (IS_GEN9_BC(dev_priv)) { 3174 } else if (IS_GEN9_BC(dev_priv)) {
3049 skl_display_core_init(dev_priv, resume); 3175 skl_display_core_init(dev_priv, resume);
@@ -3084,7 +3210,9 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3084 if (!i915_modparams.disable_power_well) 3210 if (!i915_modparams.disable_power_well)
3085 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3211 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3086 3212
3087 if (IS_CANNONLAKE(dev_priv)) 3213 if (IS_ICELAKE(dev_priv))
3214 icl_display_core_uninit(dev_priv);
3215 else if (IS_CANNONLAKE(dev_priv))
3088 cnl_display_core_uninit(dev_priv); 3216 cnl_display_core_uninit(dev_priv);
3089 else if (IS_GEN9_BC(dev_priv)) 3217 else if (IS_GEN9_BC(dev_priv))
3090 skl_display_core_uninit(dev_priv); 3218 skl_display_core_uninit(dev_priv);
@@ -3200,18 +3328,19 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3200 * @dev_priv: i915 device instance 3328 * @dev_priv: i915 device instance
3201 * 3329 *
3202 * This function grabs a device-level runtime pm reference if the device is 3330 * This function grabs a device-level runtime pm reference if the device is
3203 * already in use and ensures that it is powered up. 3331 * already in use and ensures that it is powered up. It is illegal to try
3332 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
3204 * 3333 *
3205 * Any runtime pm reference obtained by this function must have a symmetric 3334 * Any runtime pm reference obtained by this function must have a symmetric
3206 * call to intel_runtime_pm_put() to release the reference again. 3335 * call to intel_runtime_pm_put() to release the reference again.
3336 *
3337 * Returns: True if the wakeref was acquired, or False otherwise.
3207 */ 3338 */
3208bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 3339bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3209{ 3340{
3210 struct pci_dev *pdev = dev_priv->drm.pdev;
3211 struct device *kdev = &pdev->dev;
3212
3213 if (IS_ENABLED(CONFIG_PM)) { 3341 if (IS_ENABLED(CONFIG_PM)) {
3214 int ret = pm_runtime_get_if_in_use(kdev); 3342 struct pci_dev *pdev = dev_priv->drm.pdev;
3343 struct device *kdev = &pdev->dev;
3215 3344
3216 /* 3345 /*
3217 * In cases runtime PM is disabled by the RPM core and we get 3346 * In cases runtime PM is disabled by the RPM core and we get
@@ -3219,9 +3348,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3219 * function, since the power state is undefined. This applies 3348 * function, since the power state is undefined. This applies
3220 * atm to the late/early system suspend/resume handlers. 3349 * atm to the late/early system suspend/resume handlers.
3221 */ 3350 */
3222 WARN_ONCE(ret < 0, 3351 if (pm_runtime_get_if_in_use(kdev) <= 0)
3223 "pm_runtime_get_if_in_use() failed: %d\n", ret);
3224 if (ret <= 0)
3225 return false; 3352 return false;
3226 } 3353 }
3227 3354
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0bf97ed5ffac..0c14d1c04cbd 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -214,7 +214,7 @@ static bool
214intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, 214intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
215 struct intel_sdvo_connector *intel_sdvo_connector); 215 struct intel_sdvo_connector *intel_sdvo_connector);
216 216
217/** 217/*
218 * Writes the SDVOB or SDVOC with the given value, but always writes both 218 * Writes the SDVOB or SDVOC with the given value, but always writes both
219 * SDVOB and SDVOC to work around apparent hardware issues (according to 219 * SDVOB and SDVOC to work around apparent hardware issues (according to
220 * comments in the BIOS). 220 * comments in the BIOS).
@@ -250,10 +250,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
250 * writing them only once doesn't appear to 'stick'. 250 * writing them only once doesn't appear to 'stick'.
251 * The BIOS does this too. Yay, magic 251 * The BIOS does this too. Yay, magic
252 */ 252 */
253 for (i = 0; i < 2; i++) 253 for (i = 0; i < 2; i++) {
254 {
255 I915_WRITE(GEN3_SDVOB, bval); 254 I915_WRITE(GEN3_SDVOB, bval);
256 POSTING_READ(GEN3_SDVOB); 255 POSTING_READ(GEN3_SDVOB);
256
257 I915_WRITE(GEN3_SDVOC, cval); 257 I915_WRITE(GEN3_SDVOC, cval);
258 POSTING_READ(GEN3_SDVOC); 258 POSTING_READ(GEN3_SDVOC);
259 } 259 }
@@ -643,7 +643,7 @@ static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
643 &targets, sizeof(targets)); 643 &targets, sizeof(targets));
644} 644}
645 645
646/** 646/*
647 * Return whether each input is trained. 647 * Return whether each input is trained.
648 * 648 *
649 * This function is making an assumption about the layout of the response, 649 * This function is making an assumption about the layout of the response,
@@ -1061,8 +1061,10 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
1061 return true; 1061 return true;
1062} 1062}
1063 1063
1064/* Asks the sdvo controller for the preferred input mode given the output mode. 1064/*
1065 * Unfortunately we have to set up the full output mode to do that. */ 1065 * Asks the sdvo controller for the preferred input mode given the output mode.
1066 * Unfortunately we have to set up the full output mode to do that.
1067 */
1066static bool 1068static bool
1067intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, 1069intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1068 const struct drm_display_mode *mode, 1070 const struct drm_display_mode *mode,
@@ -1095,8 +1097,10 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
1095 unsigned dotclock = pipe_config->port_clock; 1097 unsigned dotclock = pipe_config->port_clock;
1096 struct dpll *clock = &pipe_config->dpll; 1098 struct dpll *clock = &pipe_config->dpll;
1097 1099
1098 /* SDVO TV has fixed PLL values depend on its clock range, 1100 /*
1099 this mirrors vbios setting. */ 1101 * SDVO TV has fixed PLL values depend on its clock range,
1102 * this mirrors vbios setting.
1103 */
1100 if (dotclock >= 100000 && dotclock < 140500) { 1104 if (dotclock >= 100000 && dotclock < 140500) {
1101 clock->p1 = 2; 1105 clock->p1 = 2;
1102 clock->p2 = 10; 1106 clock->p2 = 10;
@@ -1132,7 +1136,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1132 if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) 1136 if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
1133 pipe_config->has_pch_encoder = true; 1137 pipe_config->has_pch_encoder = true;
1134 1138
1135 /* We need to construct preferred input timings based on our 1139 /*
1140 * We need to construct preferred input timings based on our
1136 * output timings. To do that, we have to set the output 1141 * output timings. To do that, we have to set the output
1137 * timings, even though this isn't really the right place in 1142 * timings, even though this isn't really the right place in
1138 * the sequence to do it. Oh well. 1143 * the sequence to do it. Oh well.
@@ -1155,7 +1160,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1155 adjusted_mode); 1160 adjusted_mode);
1156 } 1161 }
1157 1162
1158 /* Make the CRTC code factor in the SDVO pixel multiplier. The 1163 /*
1164 * Make the CRTC code factor in the SDVO pixel multiplier. The
1159 * SDVO device will factor out the multiplier during mode_set. 1165 * SDVO device will factor out the multiplier during mode_set.
1160 */ 1166 */
1161 pipe_config->pixel_multiplier = 1167 pipe_config->pixel_multiplier =
@@ -1169,9 +1175,12 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1169 pipe_config->has_audio = true; 1175 pipe_config->has_audio = true;
1170 1176
1171 if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1177 if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1172 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1178 /*
1173 /* FIXME: This bit is only valid when using TMDS encoding and 8 1179 * See CEA-861-E - 5.1 Default Encoding Parameters
1174 * bit per color mode. */ 1180 *
1181 * FIXME: This bit is only valid when using TMDS encoding and 8
1182 * bit per color mode.
1183 */
1175 if (pipe_config->has_hdmi_sink && 1184 if (pipe_config->has_hdmi_sink &&
1176 drm_match_cea_mode(adjusted_mode) > 1) 1185 drm_match_cea_mode(adjusted_mode) > 1)
1177 pipe_config->limited_color_range = true; 1186 pipe_config->limited_color_range = true;
@@ -1272,7 +1281,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1272 1281
1273 intel_sdvo_update_props(intel_sdvo, sdvo_state); 1282 intel_sdvo_update_props(intel_sdvo, sdvo_state);
1274 1283
1275 /* First, set the input mapping for the first input to our controlled 1284 /*
1285 * First, set the input mapping for the first input to our controlled
1276 * output. This is only correct if we're a single-input device, in 1286 * output. This is only correct if we're a single-input device, in
1277 * which case the first input is the output from the appropriate SDVO 1287 * which case the first input is the output from the appropriate SDVO
1278 * channel on the motherboard. In a two-input device, the first input 1288 * channel on the motherboard. In a two-input device, the first input
@@ -1435,8 +1445,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1435 1445
1436 ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd); 1446 ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
1437 if (!ret) { 1447 if (!ret) {
1438 /* Some sdvo encoders are not spec compliant and don't 1448 /*
1439 * implement the mandatory get_timings function. */ 1449 * Some sdvo encoders are not spec compliant and don't
1450 * implement the mandatory get_timings function.
1451 */
1440 DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n"); 1452 DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
1441 pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS; 1453 pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
1442 } else { 1454 } else {
@@ -1585,7 +1597,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
1585 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 1597 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
1586 1598
1587 success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); 1599 success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1588 /* Warn if the device reported failure to sync. 1600 /*
1601 * Warn if the device reported failure to sync.
1602 *
1589 * A lot of SDVO devices fail to notify of sync, but it's 1603 * A lot of SDVO devices fail to notify of sync, but it's
1590 * a given it the status is a success, we succeeded. 1604 * a given it the status is a success, we succeeded.
1591 */ 1605 */
@@ -1672,8 +1686,10 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
1672 if (!I915_HAS_HOTPLUG(dev_priv)) 1686 if (!I915_HAS_HOTPLUG(dev_priv))
1673 return 0; 1687 return 0;
1674 1688
1675 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise 1689 /*
1676 * on the line. */ 1690 * HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
1691 * on the line.
1692 */
1677 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1693 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1678 return 0; 1694 return 0;
1679 1695
@@ -1957,7 +1973,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1957 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1973 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1958 connector->base.id, connector->name); 1974 connector->base.id, connector->name);
1959 1975
1960 /* Read the list of supported input resolutions for the selected TV 1976 /*
1977 * Read the list of supported input resolutions for the selected TV
1961 * format. 1978 * format.
1962 */ 1979 */
1963 format_map = 1 << conn_state->tv.mode; 1980 format_map = 1 << conn_state->tv.mode;
@@ -2268,7 +2285,8 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
2268 uint16_t mask = 0; 2285 uint16_t mask = 0;
2269 unsigned int num_bits; 2286 unsigned int num_bits;
2270 2287
2271 /* Make a mask of outputs less than or equal to our own priority in the 2288 /*
2289 * Make a mask of outputs less than or equal to our own priority in the
2272 * list. 2290 * list.
2273 */ 2291 */
2274 switch (sdvo->controlled_output) { 2292 switch (sdvo->controlled_output) {
@@ -2298,7 +2316,7 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
2298 sdvo->ddc_bus = 1 << num_bits; 2316 sdvo->ddc_bus = 1 << num_bits;
2299} 2317}
2300 2318
2301/** 2319/*
2302 * Choose the appropriate DDC bus for control bus switch command for this 2320 * Choose the appropriate DDC bus for control bus switch command for this
2303 * SDVO output based on the controlled output. 2321 * SDVO output based on the controlled output.
2304 * 2322 *
@@ -2342,9 +2360,11 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
2342 2360
2343 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); 2361 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
2344 2362
2345 /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow 2363 /*
2364 * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
2346 * our code totally fails once we start using gmbus. Hence fall back to 2365 * our code totally fails once we start using gmbus. Hence fall back to
2347 * bit banging for now. */ 2366 * bit banging for now.
2367 */
2348 intel_gmbus_force_bit(sdvo->i2c, true); 2368 intel_gmbus_force_bit(sdvo->i2c, true);
2349} 2369}
2350 2370
@@ -2379,7 +2399,8 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
2379 if (my_mapping->slave_addr) 2399 if (my_mapping->slave_addr)
2380 return my_mapping->slave_addr; 2400 return my_mapping->slave_addr;
2381 2401
2382 /* If the BIOS only described a different SDVO device, use the 2402 /*
2403 * If the BIOS only described a different SDVO device, use the
2383 * address that it isn't using. 2404 * address that it isn't using.
2384 */ 2405 */
2385 if (other_mapping->slave_addr) { 2406 if (other_mapping->slave_addr) {
@@ -2389,7 +2410,8 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
2389 return 0x70; 2410 return 0x70;
2390 } 2411 }
2391 2412
2392 /* No SDVO device info is found for another DVO port, 2413 /*
2414 * No SDVO device info is found for another DVO port,
2393 * so use mapping assumption we had before BIOS parsing. 2415 * so use mapping assumption we had before BIOS parsing.
2394 */ 2416 */
2395 if (sdvo->port == PORT_B) 2417 if (sdvo->port == PORT_B)
@@ -2490,7 +2512,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2490 if (intel_sdvo_get_hotplug_support(intel_sdvo) & 2512 if (intel_sdvo_get_hotplug_support(intel_sdvo) &
2491 intel_sdvo_connector->output_flag) { 2513 intel_sdvo_connector->output_flag) {
2492 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; 2514 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
2493 /* Some SDVO devices have one-shot hotplug interrupts. 2515 /*
2516 * Some SDVO devices have one-shot hotplug interrupts.
2494 * Ensure that they get re-enabled when an interrupt happens. 2517 * Ensure that they get re-enabled when an interrupt happens.
2495 */ 2518 */
2496 intel_encoder->hot_plug = intel_sdvo_enable_hotplug; 2519 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
@@ -2789,7 +2812,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2789 to_intel_sdvo_connector_state(conn_state); 2812 to_intel_sdvo_connector_state(conn_state);
2790 uint16_t response, data_value[2]; 2813 uint16_t response, data_value[2];
2791 2814
2792 /* when horizontal overscan is supported, Add the left/right property */ 2815 /* when horizontal overscan is supported, Add the left/right property */
2793 if (enhancements.overscan_h) { 2816 if (enhancements.overscan_h) {
2794 if (!intel_sdvo_get_value(intel_sdvo, 2817 if (!intel_sdvo_get_value(intel_sdvo,
2795 SDVO_CMD_GET_MAX_OVERSCAN_H, 2818 SDVO_CMD_GET_MAX_OVERSCAN_H,
@@ -3074,7 +3097,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
3074 goto err_output; 3097 goto err_output;
3075 } 3098 }
3076 3099
3077 /* Only enable the hotplug irq if we need it, to work around noisy 3100 /*
3101 * Only enable the hotplug irq if we need it, to work around noisy
3078 * hotplug lines. 3102 * hotplug lines.
3079 */ 3103 */
3080 if (intel_sdvo->hotplug_active) { 3104 if (intel_sdvo->hotplug_active) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 3be22c0fcfb5..e098e4b2c85c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1063,8 +1063,8 @@ intel_check_sprite_plane(struct intel_plane *plane,
1063 return 0; 1063 return 0;
1064} 1064}
1065 1065
1066int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1066int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
1067 struct drm_file *file_priv) 1067 struct drm_file *file_priv)
1068{ 1068{
1069 struct drm_i915_private *dev_priv = to_i915(dev); 1069 struct drm_i915_private *dev_priv = to_i915(dev);
1070 struct drm_intel_sprite_colorkey *set = data; 1070 struct drm_intel_sprite_colorkey *set = data;
@@ -1077,6 +1077,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1077 /* ignore the pointless "none" flag */ 1077 /* ignore the pointless "none" flag */
1078 set->flags &= ~I915_SET_COLORKEY_NONE; 1078 set->flags &= ~I915_SET_COLORKEY_NONE;
1079 1079
1080 if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
1081 return -EINVAL;
1082
1080 /* Make sure we don't try to enable both src & dest simultaneously */ 1083 /* Make sure we don't try to enable both src & dest simultaneously */
1081 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 1084 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
1082 return -EINVAL; 1085 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b3dabc219e6a..885fc3809f7f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -43,7 +43,6 @@ enum tv_margin {
43 TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM 43 TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
44}; 44};
45 45
46/** Private structure for the integrated TV support */
47struct intel_tv { 46struct intel_tv {
48 struct intel_encoder base; 47 struct intel_encoder base;
49 48
@@ -370,12 +369,11 @@ struct tv_mode {
370 * The constants below were all computed using a 107.520MHz clock 369 * The constants below were all computed using a 107.520MHz clock
371 */ 370 */
372 371
373/** 372/*
374 * Register programming values for TV modes. 373 * Register programming values for TV modes.
375 * 374 *
376 * These values account for -1s required. 375 * These values account for -1s required.
377 */ 376 */
378
379static const struct tv_mode tv_modes[] = { 377static const struct tv_mode tv_modes[] = {
380 { 378 {
381 .name = "NTSC-M", 379 .name = "NTSC-M",
@@ -1126,14 +1124,6 @@ static const struct drm_display_mode reported_modes[] = {
1126 }, 1124 },
1127}; 1125};
1128 1126
1129/**
1130 * Detects TV presence by checking for load.
1131 *
1132 * Requires that the current pipe's DPLL is active.
1133
1134 * \return true if TV is connected.
1135 * \return false if TV is disconnected.
1136 */
1137static int 1127static int
1138intel_tv_detect_type(struct intel_tv *intel_tv, 1128intel_tv_detect_type(struct intel_tv *intel_tv,
1139 struct drm_connector *connector) 1129 struct drm_connector *connector)
@@ -1259,12 +1249,6 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1259 connector->state->tv.mode = i; 1249 connector->state->tv.mode = i;
1260} 1250}
1261 1251
1262/**
1263 * Detect the TV connection.
1264 *
1265 * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
1266 * we have a pipe programmed in order to probe the TV.
1267 */
1268static int 1252static int
1269intel_tv_detect(struct drm_connector *connector, 1253intel_tv_detect(struct drm_connector *connector,
1270 struct drm_modeset_acquire_ctx *ctx, 1254 struct drm_modeset_acquire_ctx *ctx,
@@ -1339,13 +1323,6 @@ intel_tv_choose_preferred_modes(const struct tv_mode *tv_mode,
1339 } 1323 }
1340} 1324}
1341 1325
1342/**
1343 * Stub get_modes function.
1344 *
1345 * This should probably return a set of fixed modes, unless we can figure out
1346 * how to probe modes off of TV connections.
1347 */
1348
1349static int 1326static int
1350intel_tv_get_modes(struct drm_connector *connector) 1327intel_tv_get_modes(struct drm_connector *connector)
1351{ 1328{
@@ -1512,7 +1489,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
1512 connector = &intel_connector->base; 1489 connector = &intel_connector->base;
1513 state = connector->state; 1490 state = connector->state;
1514 1491
1515 /* The documentation, for the older chipsets at least, recommend 1492 /*
1493 * The documentation, for the older chipsets at least, recommend
1516 * using a polling method rather than hotplug detection for TVs. 1494 * using a polling method rather than hotplug detection for TVs.
1517 * This is because in order to perform the hotplug detection, the PLLs 1495 * This is because in order to perform the hotplug detection, the PLLs
1518 * for the TV must be kept alive increasing power drain and starving 1496 * for the TV must be kept alive increasing power drain and starving
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 784eff9cdfc8..3ec0ce505b76 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -197,11 +197,12 @@ fail:
197 197
198/** 198/**
199 * intel_uc_fw_upload - load uC firmware using custom loader 199 * intel_uc_fw_upload - load uC firmware using custom loader
200 *
201 * @uc_fw: uC firmware 200 * @uc_fw: uC firmware
202 * @loader: custom uC firmware loader function 201 * @xfer: custom uC firmware loader function
203 * 202 *
204 * Loads uC firmware using custom loader and updates internal flags. 203 * Loads uC firmware using custom loader and updates internal flags.
204 *
205 * Return: 0 on success, non-zero on failure.
205 */ 206 */
206int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, 207int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
207 int (*xfer)(struct intel_uc_fw *uc_fw, 208 int (*xfer)(struct intel_uc_fw *uc_fw,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 164dbb8cfa36..5ae9a62712ca 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1522,9 +1522,11 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
1522 engine->name); 1522 engine->name);
1523 1523
1524 I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); 1524 I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
1525 POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
1525 1526
1526 I915_WRITE_FW(RING_HEAD(base), 0); 1527 I915_WRITE_FW(RING_HEAD(base), 0);
1527 I915_WRITE_FW(RING_TAIL(base), 0); 1528 I915_WRITE_FW(RING_TAIL(base), 0);
1529 POSTING_READ_FW(RING_TAIL(base));
1528 1530
1529 /* The ring must be empty before it is disabled */ 1531 /* The ring must be empty before it is disabled */
1530 I915_WRITE_FW(RING_CTL(base), 0); 1532 I915_WRITE_FW(RING_CTL(base), 0);
@@ -1548,24 +1550,31 @@ static void i915_stop_engines(struct drm_i915_private *dev_priv,
1548 gen3_stop_engine(engine); 1550 gen3_stop_engine(engine);
1549} 1551}
1550 1552
1551static bool i915_reset_complete(struct pci_dev *pdev) 1553static bool i915_in_reset(struct pci_dev *pdev)
1552{ 1554{
1553 u8 gdrst; 1555 u8 gdrst;
1554 1556
1555 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1557 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1556 return (gdrst & GRDOM_RESET_STATUS) == 0; 1558 return gdrst & GRDOM_RESET_STATUS;
1557} 1559}
1558 1560
1559static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1561static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1560{ 1562{
1561 struct pci_dev *pdev = dev_priv->drm.pdev; 1563 struct pci_dev *pdev = dev_priv->drm.pdev;
1564 int err;
1562 1565
1563 /* assert reset for at least 20 usec */ 1566 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
1564 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1567 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1565 usleep_range(50, 200); 1568 usleep_range(50, 200);
1569 err = wait_for(i915_in_reset(pdev), 500);
1570
1571 /* Clear the reset request. */
1566 pci_write_config_byte(pdev, I915_GDRST, 0); 1572 pci_write_config_byte(pdev, I915_GDRST, 0);
1573 usleep_range(50, 200);
1574 if (!err)
1575 err = wait_for(!i915_in_reset(pdev), 500);
1567 1576
1568 return wait_for(i915_reset_complete(pdev), 500); 1577 return err;
1569} 1578}
1570 1579
1571static bool g4x_reset_complete(struct pci_dev *pdev) 1580static bool g4x_reset_complete(struct pci_dev *pdev)
@@ -1874,9 +1883,9 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1874 if (!i915_modparams.reset) 1883 if (!i915_modparams.reset)
1875 return NULL; 1884 return NULL;
1876 1885
1877 if (INTEL_INFO(dev_priv)->gen >= 8) 1886 if (INTEL_GEN(dev_priv) >= 8)
1878 return gen8_reset_engines; 1887 return gen8_reset_engines;
1879 else if (INTEL_INFO(dev_priv)->gen >= 6) 1888 else if (INTEL_GEN(dev_priv) >= 6)
1880 return gen6_reset_engines; 1889 return gen6_reset_engines;
1881 else if (IS_GEN5(dev_priv)) 1890 else if (IS_GEN5(dev_priv))
1882 return ironlake_do_reset; 1891 return ironlake_do_reset;
@@ -1884,7 +1893,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1884 return g4x_do_reset; 1893 return g4x_do_reset;
1885 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 1894 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1886 return g33_do_reset; 1895 return g33_do_reset;
1887 else if (INTEL_INFO(dev_priv)->gen >= 3) 1896 else if (INTEL_GEN(dev_priv) >= 3)
1888 return i915_do_reset; 1897 return i915_do_reset;
1889 else 1898 else
1890 return NULL; 1899 return NULL;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index bed019ef000f..53ef77d0c97c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -198,4 +198,9 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
198 2, timeout_ms, NULL); 198 2, timeout_ms, NULL);
199} 199}
200 200
201#define raw_reg_read(base, reg) \
202 readl(base + i915_mmio_reg_offset(reg))
203#define raw_reg_write(base, reg, value) \
204 writel(value, base + i915_mmio_reg_offset(reg))
205
201#endif /* !__INTEL_UNCORE_H__ */ 206#endif /* !__INTEL_UNCORE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index a2632df39173..391f3d9ffdf1 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -129,8 +129,8 @@ huge_gem_object(struct drm_i915_private *i915,
129 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); 129 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
130 i915_gem_object_init(obj, &huge_ops); 130 i915_gem_object_init(obj, &huge_ops);
131 131
132 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 132 obj->read_domains = I915_GEM_DOMAIN_CPU;
133 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 133 obj->write_domain = I915_GEM_DOMAIN_CPU;
134 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 134 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
135 i915_gem_object_set_cache_coherency(obj, cache_level); 135 i915_gem_object_set_cache_coherency(obj, cache_level);
136 obj->scratch = phys_size; 136 obj->scratch = phys_size;
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 2ea69394f428..52b1bd17bf46 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -178,8 +178,8 @@ huge_pages_object(struct drm_i915_private *i915,
178 drm_gem_private_object_init(&i915->drm, &obj->base, size); 178 drm_gem_private_object_init(&i915->drm, &obj->base, size);
179 i915_gem_object_init(obj, &huge_page_ops); 179 i915_gem_object_init(obj, &huge_page_ops);
180 180
181 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 181 obj->write_domain = I915_GEM_DOMAIN_CPU;
182 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 182 obj->read_domains = I915_GEM_DOMAIN_CPU;
183 obj->cache_level = I915_CACHE_NONE; 183 obj->cache_level = I915_CACHE_NONE;
184 184
185 obj->mm.page_mask = page_mask; 185 obj->mm.page_mask = page_mask;
@@ -329,8 +329,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
329 else 329 else
330 i915_gem_object_init(obj, &fake_ops); 330 i915_gem_object_init(obj, &fake_ops);
331 331
332 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 332 obj->write_domain = I915_GEM_DOMAIN_CPU;
333 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 333 obj->read_domains = I915_GEM_DOMAIN_CPU;
334 obj->cache_level = I915_CACHE_NONE; 334 obj->cache_level = I915_CACHE_NONE;
335 335
336 return obj; 336 return obj;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 56a803d11916..6da2a2f29c54 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -215,8 +215,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
215 } 215 }
216 216
217 i915_gem_obj_finish_shmem_access(obj); 217 i915_gem_obj_finish_shmem_access(obj);
218 obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; 218 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
219 obj->base.write_domain = 0; 219 obj->write_domain = 0;
220 return 0; 220 return 0;
221} 221}
222 222
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index d8064276431c..f7dc926f4ef1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -113,8 +113,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
113 drm_gem_private_object_init(&i915->drm, &obj->base, size); 113 drm_gem_private_object_init(&i915->drm, &obj->base, size);
114 i915_gem_object_init(obj, &fake_ops); 114 i915_gem_object_init(obj, &fake_ops);
115 115
116 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 116 obj->write_domain = I915_GEM_DOMAIN_CPU;
117 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 117 obj->read_domains = I915_GEM_DOMAIN_CPU;
118 obj->cache_level = I915_CACHE_NONE; 118 obj->cache_level = I915_CACHE_NONE;
119 119
120 /* Preallocate the "backing storage" */ 120 /* Preallocate the "backing storage" */
@@ -927,7 +927,7 @@ static int shrink_boom(struct drm_i915_private *i915,
927 927
928 explode = fake_dma_object(i915, size); 928 explode = fake_dma_object(i915, size);
929 if (IS_ERR(explode)) { 929 if (IS_ERR(explode)) {
930 err = PTR_ERR(purge); 930 err = PTR_ERR(explode);
931 goto err_purge; 931 goto err_purge;
932 } 932 }
933 933
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index f32aa6bb79e2..3c64815e910b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -212,8 +212,11 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
212 return -EINTR; 212 return -EINTR;
213 213
214 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 214 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
215 if (err) 215 if (err) {
216 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
217 tile->tiling, tile->stride, err);
216 return err; 218 return err;
219 }
217 220
218 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 221 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
219 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 222 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
@@ -230,13 +233,16 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
230 GEM_BUG_ON(view.partial.size > nreal); 233 GEM_BUG_ON(view.partial.size > nreal);
231 234
232 err = i915_gem_object_set_to_gtt_domain(obj, true); 235 err = i915_gem_object_set_to_gtt_domain(obj, true);
233 if (err) 236 if (err) {
237 pr_err("Failed to flush to GTT write domain; err=%d\n",
238 err);
234 return err; 239 return err;
240 }
235 241
236 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 242 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
237 if (IS_ERR(vma)) { 243 if (IS_ERR(vma)) {
238 pr_err("Failed to pin partial view: offset=%lu\n", 244 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
239 page); 245 page, (int)PTR_ERR(vma));
240 return PTR_ERR(vma); 246 return PTR_ERR(vma);
241 } 247 }
242 248
@@ -246,8 +252,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
246 io = i915_vma_pin_iomap(vma); 252 io = i915_vma_pin_iomap(vma);
247 i915_vma_unpin(vma); 253 i915_vma_unpin(vma);
248 if (IS_ERR(io)) { 254 if (IS_ERR(io)) {
249 pr_err("Failed to iomap partial view: offset=%lu\n", 255 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
250 page); 256 page, (int)PTR_ERR(io));
251 return PTR_ERR(io); 257 return PTR_ERR(io);
252 } 258 }
253 259
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 3f9016466dea..fb74e2cf8a0a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -87,7 +87,7 @@ static int validate_client(struct intel_guc_client *client,
87 87
88static bool client_doorbell_in_sync(struct intel_guc_client *client) 88static bool client_doorbell_in_sync(struct intel_guc_client *client)
89{ 89{
90 return doorbell_ok(client->guc, client->doorbell_id); 90 return !client || doorbell_ok(client->guc, client->doorbell_id);
91} 91}
92 92
93/* 93/*
@@ -137,7 +137,6 @@ static int igt_guc_clients(void *args)
137 goto unlock; 137 goto unlock;
138 } 138 }
139 GEM_BUG_ON(!guc->execbuf_client); 139 GEM_BUG_ON(!guc->execbuf_client);
140 GEM_BUG_ON(!guc->preempt_client);
141 140
142 err = validate_client(guc->execbuf_client, 141 err = validate_client(guc->execbuf_client,
143 GUC_CLIENT_PRIORITY_KMD_NORMAL, false); 142 GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
@@ -146,16 +145,18 @@ static int igt_guc_clients(void *args)
146 goto out; 145 goto out;
147 } 146 }
148 147
149 err = validate_client(guc->preempt_client, 148 if (guc->preempt_client) {
150 GUC_CLIENT_PRIORITY_KMD_HIGH, true); 149 err = validate_client(guc->preempt_client,
151 if (err) { 150 GUC_CLIENT_PRIORITY_KMD_HIGH, true);
152 pr_err("preempt client validation failed\n"); 151 if (err) {
153 goto out; 152 pr_err("preempt client validation failed\n");
153 goto out;
154 }
154 } 155 }
155 156
156 /* each client should now have reserved a doorbell */ 157 /* each client should now have reserved a doorbell */
157 if (!has_doorbell(guc->execbuf_client) || 158 if (!has_doorbell(guc->execbuf_client) ||
158 !has_doorbell(guc->preempt_client)) { 159 (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
159 pr_err("guc_clients_create didn't reserve doorbells\n"); 160 pr_err("guc_clients_create didn't reserve doorbells\n");
160 err = -EINVAL; 161 err = -EINVAL;
161 goto out; 162 goto out;
@@ -224,7 +225,8 @@ out:
224 * clients during unload. 225 * clients during unload.
225 */ 226 */
226 destroy_doorbell(guc->execbuf_client); 227 destroy_doorbell(guc->execbuf_client);
227 destroy_doorbell(guc->preempt_client); 228 if (guc->preempt_client)
229 destroy_doorbell(guc->preempt_client);
228 guc_clients_destroy(guc); 230 guc_clients_destroy(guc);
229 guc_clients_create(guc); 231 guc_clients_create(guc);
230 guc_clients_doorbell_init(guc); 232 guc_clients_doorbell_init(guc);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 1bc61f3f76fc..3175db70cc6e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -243,16 +243,10 @@ struct drm_i915_private *mock_gem_device(void)
243 if (!i915->kernel_context) 243 if (!i915->kernel_context)
244 goto err_engine; 244 goto err_engine;
245 245
246 i915->preempt_context = mock_context(i915, NULL);
247 if (!i915->preempt_context)
248 goto err_kernel_context;
249
250 WARN_ON(i915_gemfs_init(i915)); 246 WARN_ON(i915_gemfs_init(i915));
251 247
252 return i915; 248 return i915;
253 249
254err_kernel_context:
255 i915_gem_context_put(i915->kernel_context);
256err_engine: 250err_engine:
257 for_each_engine(engine, i915, id) 251 for_each_engine(engine, i915, id)
258 mock_engine_free(engine); 252 mock_engine_free(engine);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index dfda5e0ed166..26129b2b082d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -570,7 +570,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
570 base &= ~7; 570 base &= ~7;
571 } 571 }
572 work->base = base; 572 work->base = base;
573 work->target_vblank = target - drm_crtc_vblank_count(crtc) + 573 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
574 dev->driver->get_vblank_counter(dev, work->crtc_id); 574 dev->driver->get_vblank_counter(dev, work->crtc_id);
575 575
576 /* We borrow the event spin lock for protecting flip_work */ 576 /* We borrow the event spin lock for protecting flip_work */
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b8403ed48285..49df2db2ad46 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1359,7 +1359,7 @@ static u32 tegra_dc_get_vblank_counter(struct drm_crtc *crtc)
1359 return host1x_syncpt_read(dc->syncpt); 1359 return host1x_syncpt_read(dc->syncpt);
1360 1360
1361 /* fallback to software emulated VBLANK counter */ 1361 /* fallback to software emulated VBLANK counter */
1362 return drm_crtc_vblank_count(&dc->base); 1362 return (u32)drm_crtc_vblank_count(&dc->base);
1363} 1363}
1364 1364
1365static int tegra_dc_enable_vblank(struct drm_crtc *crtc) 1365static int tegra_dc_enable_vblank(struct drm_crtc *crtc)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index cf13842a6dbd..2c711a24c80c 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -154,7 +154,7 @@ struct __drm_crtcs_state {
154 struct drm_crtc *ptr; 154 struct drm_crtc *ptr;
155 struct drm_crtc_state *state, *old_state, *new_state; 155 struct drm_crtc_state *state, *old_state, *new_state;
156 s32 __user *out_fence_ptr; 156 s32 __user *out_fence_ptr;
157 unsigned last_vblank_count; 157 u64 last_vblank_count;
158}; 158};
159 159
160struct __drm_connnectors_state { 160struct __drm_connnectors_state {
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 9c55c2acaa2b..3583b98a1718 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -116,21 +116,6 @@ struct drm_gem_object {
116 int name; 116 int name;
117 117
118 /** 118 /**
119 * @read_domains:
120 *
121 * Read memory domains. These monitor which caches contain read/write data
122 * related to the object. When transitioning from one set of domains
123 * to another, the driver is called to ensure that caches are suitably
124 * flushed and invalidated.
125 */
126 uint32_t read_domains;
127
128 /**
129 * @write_domain: Corresponding unique write memory domain.
130 */
131 uint32_t write_domain;
132
133 /**
134 * @dma_buf: 119 * @dma_buf:
135 * 120 *
136 * dma-buf associated with this GEM object. 121 * dma-buf associated with this GEM object.
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 425ad80ed2ac..d25a9603ab57 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -195,7 +195,9 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
195void drm_crtc_vblank_off(struct drm_crtc *crtc); 195void drm_crtc_vblank_off(struct drm_crtc *crtc);
196void drm_crtc_vblank_reset(struct drm_crtc *crtc); 196void drm_crtc_vblank_reset(struct drm_crtc *crtc);
197void drm_crtc_vblank_on(struct drm_crtc *crtc); 197void drm_crtc_vblank_on(struct drm_crtc *crtc);
198u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); 198u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
199void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
200void drm_crtc_vblank_restore(struct drm_crtc *crtc);
199 201
200bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 202bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
201 unsigned int pipe, int *max_error, 203 unsigned int pipe, int *max_error,
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 9e1fe6634424..0b2ba46fa00b 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -416,18 +416,19 @@
416 416
417/* CNL */ 417/* CNL */
418#define INTEL_CNL_IDS(info) \ 418#define INTEL_CNL_IDS(info) \
419 INTEL_VGA_DEVICE(0x5A52, info), \
420 INTEL_VGA_DEVICE(0x5A5A, info), \
421 INTEL_VGA_DEVICE(0x5A42, info), \
422 INTEL_VGA_DEVICE(0x5A4A, info), \
423 INTEL_VGA_DEVICE(0x5A51, info), \ 419 INTEL_VGA_DEVICE(0x5A51, info), \
424 INTEL_VGA_DEVICE(0x5A59, info), \ 420 INTEL_VGA_DEVICE(0x5A59, info), \
425 INTEL_VGA_DEVICE(0x5A41, info), \ 421 INTEL_VGA_DEVICE(0x5A41, info), \
426 INTEL_VGA_DEVICE(0x5A49, info), \ 422 INTEL_VGA_DEVICE(0x5A49, info), \
427 INTEL_VGA_DEVICE(0x5A71, info), \ 423 INTEL_VGA_DEVICE(0x5A52, info), \
428 INTEL_VGA_DEVICE(0x5A79, info), \ 424 INTEL_VGA_DEVICE(0x5A5A, info), \
425 INTEL_VGA_DEVICE(0x5A42, info), \
426 INTEL_VGA_DEVICE(0x5A4A, info), \
427 INTEL_VGA_DEVICE(0x5A50, info), \
428 INTEL_VGA_DEVICE(0x5A40, info), \
429 INTEL_VGA_DEVICE(0x5A54, info), \ 429 INTEL_VGA_DEVICE(0x5A54, info), \
430 INTEL_VGA_DEVICE(0x5A5C, info), \ 430 INTEL_VGA_DEVICE(0x5A5C, info), \
431 INTEL_VGA_DEVICE(0x5A44, info) 431 INTEL_VGA_DEVICE(0x5A44, info), \
432 INTEL_VGA_DEVICE(0x5A4C, info)
432 433
433#endif /* _I915_PCIIDS_H */ 434#endif /* _I915_PCIIDS_H */