aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-07-05 05:40:23 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-07-05 06:58:45 -0400
commit91c8a326a192117219d5b9b980244c3662e35404 (patch)
treee140056c88b1eee69830bd2e2f26e5a12bee30b3
parentded8b07d4c2827811215d92be4c97426ce7f1999 (diff)
drm/i915: Convert dev_priv->dev backpointers to dev_priv->drm
Since drm_i915_private is now a subclass of drm_device we do not need to chase the drm_i915_private->dev backpointer and can instead simply access drm_i915_private->drm directly. text data bss dec hex filename 1068757 4565 416 1073738 10624a drivers/gpu/drm/i915/i915.ko 1066949 4565 416 1071930 105b3a drivers/gpu/drm/i915/i915.ko Created by the coccinelle script: @@ struct drm_i915_private *d; identifier i; @@ ( - d->dev->i + d->drm.i | - d->dev + &d->drm ) and for good measure the dev_priv->dev backpointer was removed entirely. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1467711623-2905-4-git-send-email-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c39
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c36
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h20
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c54
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c30
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c8
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c14
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c11
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c29
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c26
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
35 files changed, 236 insertions, 226 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8cc453731567..a59e0caeda64 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -440,15 +440,15 @@ static void print_context_stats(struct seq_file *m,
440 440
441 memset(&stats, 0, sizeof(stats)); 441 memset(&stats, 0, sizeof(stats));
442 442
443 mutex_lock(&dev_priv->dev->struct_mutex); 443 mutex_lock(&dev_priv->drm.struct_mutex);
444 if (dev_priv->kernel_context) 444 if (dev_priv->kernel_context)
445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
446 446
447 list_for_each_entry(file, &dev_priv->dev->filelist, lhead) { 447 list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
448 struct drm_i915_file_private *fpriv = file->driver_priv; 448 struct drm_i915_file_private *fpriv = file->driver_priv;
449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
450 } 450 }
451 mutex_unlock(&dev_priv->dev->struct_mutex); 451 mutex_unlock(&dev_priv->drm.struct_mutex);
452 452
453 print_file_stats(m, "[k]contexts", stats); 453 print_file_stats(m, "[k]contexts", stats);
454} 454}
@@ -2797,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2797 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2797 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2798#endif 2798#endif
2799 seq_printf(m, "PCI device power state: %s [%d]\n", 2799 seq_printf(m, "PCI device power state: %s [%d]\n",
2800 pci_power_name(dev_priv->dev->pdev->current_state), 2800 pci_power_name(dev_priv->drm.pdev->current_state),
2801 dev_priv->dev->pdev->current_state); 2801 dev_priv->drm.pdev->current_state);
2802 2802
2803 return 0; 2803 return 0;
2804} 2804}
@@ -5098,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
5098 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5098 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5099 5099
5100 intel_runtime_pm_put(dev_priv); 5100 intel_runtime_pm_put(dev_priv);
5101 mutex_unlock(&dev_priv->dev->struct_mutex); 5101 mutex_unlock(&dev_priv->drm.struct_mutex);
5102 5102
5103 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 5103 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5104 5104
@@ -5483,7 +5483,7 @@ void intel_display_crc_init(struct drm_device *dev)
5483 5483
5484int i915_debugfs_register(struct drm_i915_private *dev_priv) 5484int i915_debugfs_register(struct drm_i915_private *dev_priv)
5485{ 5485{
5486 struct drm_minor *minor = dev_priv->dev->primary; 5486 struct drm_minor *minor = dev_priv->drm.primary;
5487 int ret, i; 5487 int ret, i;
5488 5488
5489 ret = i915_forcewake_create(minor->debugfs_root, minor); 5489 ret = i915_forcewake_create(minor->debugfs_root, minor);
@@ -5511,7 +5511,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
5511 5511
5512void i915_debugfs_unregister(struct drm_i915_private *dev_priv) 5512void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
5513{ 5513{
5514 struct drm_minor *minor = dev_priv->dev->primary; 5514 struct drm_minor *minor = dev_priv->drm.primary;
5515 int i; 5515 int i;
5516 5516
5517 drm_debugfs_remove_files(i915_debugfs_list, 5517 drm_debugfs_remove_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 595029bc55a3..694edac2c703 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -687,7 +687,7 @@ out:
687static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 687static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
688{ 688{
689 struct apertures_struct *ap; 689 struct apertures_struct *ap;
690 struct pci_dev *pdev = dev_priv->dev->pdev; 690 struct pci_dev *pdev = dev_priv->drm.pdev;
691 struct i915_ggtt *ggtt = &dev_priv->ggtt; 691 struct i915_ggtt *ggtt = &dev_priv->ggtt;
692 bool primary; 692 bool primary;
693 int ret; 693 int ret;
@@ -889,7 +889,7 @@ err_workqueues:
889 */ 889 */
890static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) 890static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
891{ 891{
892 i915_gem_load_cleanup(dev_priv->dev); 892 i915_gem_load_cleanup(&dev_priv->drm);
893 i915_workqueues_cleanup(dev_priv); 893 i915_workqueues_cleanup(dev_priv);
894} 894}
895 895
@@ -944,7 +944,7 @@ static void i915_mmio_cleanup(struct drm_device *dev)
944 */ 944 */
945static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) 945static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
946{ 946{
947 struct drm_device *dev = dev_priv->dev; 947 struct drm_device *dev = &dev_priv->drm;
948 int ret; 948 int ret;
949 949
950 if (i915_inject_load_failure()) 950 if (i915_inject_load_failure())
@@ -973,7 +973,7 @@ put_bridge:
973 */ 973 */
974static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) 974static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
975{ 975{
976 struct drm_device *dev = dev_priv->dev; 976 struct drm_device *dev = &dev_priv->drm;
977 977
978 intel_uncore_fini(dev_priv); 978 intel_uncore_fini(dev_priv);
979 i915_mmio_cleanup(dev); 979 i915_mmio_cleanup(dev);
@@ -1006,7 +1006,7 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1006 */ 1006 */
1007static int i915_driver_init_hw(struct drm_i915_private *dev_priv) 1007static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1008{ 1008{
1009 struct drm_device *dev = dev_priv->dev; 1009 struct drm_device *dev = &dev_priv->drm;
1010 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1010 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1011 uint32_t aperture_size; 1011 uint32_t aperture_size;
1012 int ret; 1012 int ret;
@@ -1125,7 +1125,7 @@ out_ggtt:
1125 */ 1125 */
1126static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) 1126static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1127{ 1127{
1128 struct drm_device *dev = dev_priv->dev; 1128 struct drm_device *dev = &dev_priv->drm;
1129 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1129 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1130 1130
1131 if (dev->pdev->msi_enabled) 1131 if (dev->pdev->msi_enabled)
@@ -1146,7 +1146,7 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1146 */ 1146 */
1147static void i915_driver_register(struct drm_i915_private *dev_priv) 1147static void i915_driver_register(struct drm_i915_private *dev_priv)
1148{ 1148{
1149 struct drm_device *dev = dev_priv->dev; 1149 struct drm_device *dev = &dev_priv->drm;
1150 1150
1151 i915_gem_shrinker_init(dev_priv); 1151 i915_gem_shrinker_init(dev_priv);
1152 1152
@@ -1197,9 +1197,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1197 acpi_video_unregister(); 1197 acpi_video_unregister();
1198 intel_opregion_unregister(dev_priv); 1198 intel_opregion_unregister(dev_priv);
1199 1199
1200 i915_teardown_sysfs(dev_priv->dev); 1200 i915_teardown_sysfs(&dev_priv->drm);
1201 i915_debugfs_unregister(dev_priv); 1201 i915_debugfs_unregister(dev_priv);
1202 drm_dev_unregister(dev_priv->dev); 1202 drm_dev_unregister(&dev_priv->drm);
1203 1203
1204 i915_gem_shrinker_cleanup(dev_priv); 1204 i915_gem_shrinker_cleanup(dev_priv);
1205} 1205}
@@ -1236,7 +1236,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1236 1236
1237 dev_priv->drm.pdev = pdev; 1237 dev_priv->drm.pdev = pdev;
1238 dev_priv->drm.dev_private = dev_priv; 1238 dev_priv->drm.dev_private = dev_priv;
1239 dev_priv->dev = &dev_priv->drm;
1240 1239
1241 ret = pci_enable_device(pdev); 1240 ret = pci_enable_device(pdev);
1242 if (ret) 1241 if (ret)
@@ -1264,13 +1263,13 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1264 * to the role/effect of the given init step. 1263 * to the role/effect of the given init step.
1265 */ 1264 */
1266 if (INTEL_INFO(dev_priv)->num_pipes) { 1265 if (INTEL_INFO(dev_priv)->num_pipes) {
1267 ret = drm_vblank_init(dev_priv->dev, 1266 ret = drm_vblank_init(&dev_priv->drm,
1268 INTEL_INFO(dev_priv)->num_pipes); 1267 INTEL_INFO(dev_priv)->num_pipes);
1269 if (ret) 1268 if (ret)
1270 goto out_cleanup_hw; 1269 goto out_cleanup_hw;
1271 } 1270 }
1272 1271
1273 ret = i915_load_modeset_init(dev_priv->dev); 1272 ret = i915_load_modeset_init(&dev_priv->drm);
1274 if (ret < 0) 1273 if (ret < 0)
1275 goto out_cleanup_vblank; 1274 goto out_cleanup_vblank;
1276 1275
@@ -1283,7 +1282,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1283 return 0; 1282 return 0;
1284 1283
1285out_cleanup_vblank: 1284out_cleanup_vblank:
1286 drm_vblank_cleanup(dev_priv->dev); 1285 drm_vblank_cleanup(&dev_priv->drm);
1287out_cleanup_hw: 1286out_cleanup_hw:
1288 i915_driver_cleanup_hw(dev_priv); 1287 i915_driver_cleanup_hw(dev_priv);
1289out_cleanup_mmio: 1288out_cleanup_mmio:
@@ -1402,7 +1401,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1402 1401
1403static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 1402static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1404{ 1403{
1405 struct drm_device *dev = dev_priv->dev; 1404 struct drm_device *dev = &dev_priv->drm;
1406 struct intel_encoder *encoder; 1405 struct intel_encoder *encoder;
1407 1406
1408 drm_modeset_lock_all(dev); 1407 drm_modeset_lock_all(dev);
@@ -1770,7 +1769,7 @@ int i915_resume_switcheroo(struct drm_device *dev)
1770 */ 1769 */
1771int i915_reset(struct drm_i915_private *dev_priv) 1770int i915_reset(struct drm_i915_private *dev_priv)
1772{ 1771{
1773 struct drm_device *dev = dev_priv->dev; 1772 struct drm_device *dev = &dev_priv->drm;
1774 struct i915_gpu_error *error = &dev_priv->gpu_error; 1773 struct i915_gpu_error *error = &dev_priv->gpu_error;
1775 unsigned reset_counter; 1774 unsigned reset_counter;
1776 int ret; 1775 int ret;
@@ -1861,7 +1860,7 @@ static int i915_pm_suspend(struct device *dev)
1861 1860
1862static int i915_pm_suspend_late(struct device *dev) 1861static int i915_pm_suspend_late(struct device *dev)
1863{ 1862{
1864 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1863 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1865 1864
1866 /* 1865 /*
1867 * We have a suspend ordering issue with the snd-hda driver also 1866 * We have a suspend ordering issue with the snd-hda driver also
@@ -1880,7 +1879,7 @@ static int i915_pm_suspend_late(struct device *dev)
1880 1879
1881static int i915_pm_poweroff_late(struct device *dev) 1880static int i915_pm_poweroff_late(struct device *dev)
1882{ 1881{
1883 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1882 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1884 1883
1885 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1884 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1886 return 0; 1885 return 0;
@@ -1890,7 +1889,7 @@ static int i915_pm_poweroff_late(struct device *dev)
1890 1889
1891static int i915_pm_resume_early(struct device *dev) 1890static int i915_pm_resume_early(struct device *dev)
1892{ 1891{
1893 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1892 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1894 1893
1895 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1894 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1896 return 0; 1895 return 0;
@@ -1900,7 +1899,7 @@ static int i915_pm_resume_early(struct device *dev)
1900 1899
1901static int i915_pm_resume(struct device *dev) 1900static int i915_pm_resume(struct device *dev)
1902{ 1901{
1903 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1902 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1904 1903
1905 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1904 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1906 return 0; 1905 return 0;
@@ -2278,7 +2277,7 @@ err1:
2278static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 2277static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2279 bool rpm_resume) 2278 bool rpm_resume)
2280{ 2279{
2281 struct drm_device *dev = dev_priv->dev; 2280 struct drm_device *dev = &dev_priv->drm;
2282 int err; 2281 int err;
2283 int ret; 2282 int ret;
2284 2283
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d33b370a057d..c269e0ad4057 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -320,15 +320,16 @@ struct i915_hotplug {
320 for_each_if ((__ports_mask) & (1 << (__port))) 320 for_each_if ((__ports_mask) & (1 << (__port)))
321 321
322#define for_each_crtc(dev, crtc) \ 322#define for_each_crtc(dev, crtc) \
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 323 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
324 324
325#define for_each_intel_plane(dev, intel_plane) \ 325#define for_each_intel_plane(dev, intel_plane) \
326 list_for_each_entry(intel_plane, \ 326 list_for_each_entry(intel_plane, \
327 &dev->mode_config.plane_list, \ 327 &(dev)->mode_config.plane_list, \
328 base.head) 328 base.head)
329 329
330#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ 330#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
331 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \ 331 list_for_each_entry(intel_plane, \
332 &(dev)->mode_config.plane_list, \
332 base.head) \ 333 base.head) \
333 for_each_if ((plane_mask) & \ 334 for_each_if ((plane_mask) & \
334 (1 << drm_plane_index(&intel_plane->base))) 335 (1 << drm_plane_index(&intel_plane->base)))
@@ -339,11 +340,15 @@ struct i915_hotplug {
339 base.head) \ 340 base.head) \
340 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 341 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
341 342
342#define for_each_intel_crtc(dev, intel_crtc) \ 343#define for_each_intel_crtc(dev, intel_crtc) \
343 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 344 list_for_each_entry(intel_crtc, \
345 &(dev)->mode_config.crtc_list, \
346 base.head)
344 347
345#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ 348#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
346 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \ 349 list_for_each_entry(intel_crtc, \
350 &(dev)->mode_config.crtc_list, \
351 base.head) \
347 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) 352 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
348 353
349#define for_each_intel_encoder(dev, intel_encoder) \ 354#define for_each_intel_encoder(dev, intel_encoder) \
@@ -353,7 +358,7 @@ struct i915_hotplug {
353 358
354#define for_each_intel_connector(dev, intel_connector) \ 359#define for_each_intel_connector(dev, intel_connector) \
355 list_for_each_entry(intel_connector, \ 360 list_for_each_entry(intel_connector, \
356 &dev->mode_config.connector_list, \ 361 &(dev)->mode_config.connector_list, \
357 base.head) 362 base.head)
358 363
359#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 364#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
@@ -1718,7 +1723,6 @@ struct intel_wm_config {
1718struct drm_i915_private { 1723struct drm_i915_private {
1719 struct drm_device drm; 1724 struct drm_device drm;
1720 1725
1721 struct drm_device *dev;
1722 struct kmem_cache *objects; 1726 struct kmem_cache *objects;
1723 struct kmem_cache *vmas; 1727 struct kmem_cache *vmas;
1724 struct kmem_cache *requests; 1728 struct kmem_cache *requests;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 20123c09bc18..8f50919ba9b4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1651,7 +1651,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1651 struct intel_engine_cs *engine = req->engine; 1651 struct intel_engine_cs *engine = req->engine;
1652 struct drm_i915_gem_request *tmp; 1652 struct drm_i915_gem_request *tmp;
1653 1653
1654 lockdep_assert_held(&engine->i915->dev->struct_mutex); 1654 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1655 1655
1656 if (list_empty(&req->list)) 1656 if (list_empty(&req->list))
1657 return; 1657 return;
@@ -1680,7 +1680,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
1680 1680
1681 interruptible = dev_priv->mm.interruptible; 1681 interruptible = dev_priv->mm.interruptible;
1682 1682
1683 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 1683 BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
1684 1684
1685 ret = __i915_wait_request(req, interruptible, NULL, NULL); 1685 ret = __i915_wait_request(req, interruptible, NULL, NULL);
1686 if (ret) 1686 if (ret)
@@ -3254,7 +3254,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3254{ 3254{
3255 struct intel_engine_cs *engine; 3255 struct intel_engine_cs *engine;
3256 3256
3257 lockdep_assert_held(&dev_priv->dev->struct_mutex); 3257 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3258 3258
3259 if (dev_priv->gt.active_engines == 0) 3259 if (dev_priv->gt.active_engines == 0)
3260 return; 3260 return;
@@ -3278,7 +3278,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
3278{ 3278{
3279 struct drm_i915_private *dev_priv = 3279 struct drm_i915_private *dev_priv =
3280 container_of(work, typeof(*dev_priv), gt.retire_work.work); 3280 container_of(work, typeof(*dev_priv), gt.retire_work.work);
3281 struct drm_device *dev = dev_priv->dev; 3281 struct drm_device *dev = &dev_priv->drm;
3282 3282
3283 /* Come back later if the device is busy... */ 3283 /* Come back later if the device is busy... */
3284 if (mutex_trylock(&dev->struct_mutex)) { 3284 if (mutex_trylock(&dev->struct_mutex)) {
@@ -3301,7 +3301,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
3301{ 3301{
3302 struct drm_i915_private *dev_priv = 3302 struct drm_i915_private *dev_priv =
3303 container_of(work, typeof(*dev_priv), gt.idle_work.work); 3303 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3304 struct drm_device *dev = dev_priv->dev; 3304 struct drm_device *dev = &dev_priv->drm;
3305 struct intel_engine_cs *engine; 3305 struct intel_engine_cs *engine;
3306 unsigned int stuck_engines; 3306 unsigned int stuck_engines;
3307 bool rearm_hangcheck; 3307 bool rearm_hangcheck;
@@ -3713,7 +3713,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
3713 struct intel_engine_cs *engine; 3713 struct intel_engine_cs *engine;
3714 int ret; 3714 int ret;
3715 3715
3716 lockdep_assert_held(&dev_priv->dev->struct_mutex); 3716 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3717 3717
3718 for_each_engine(engine, dev_priv) { 3718 for_each_engine(engine, dev_priv) {
3719 if (engine->last_context == NULL) 3719 if (engine->last_context == NULL)
@@ -5252,7 +5252,7 @@ init_engine_lists(struct intel_engine_cs *engine)
5252void 5252void
5253i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 5253i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5254{ 5254{
5255 struct drm_device *dev = dev_priv->dev; 5255 struct drm_device *dev = &dev_priv->drm;
5256 5256
5257 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5257 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5258 !IS_CHERRYVIEW(dev_priv)) 5258 !IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b4bba8a76191..3c97f0e7a003 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155 int i; 155 int i;
156 156
157 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 157 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
158 trace_i915_context_free(ctx); 158 trace_i915_context_free(ctx);
159 159
160 /* 160 /*
@@ -465,7 +465,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
465{ 465{
466 struct intel_engine_cs *engine; 466 struct intel_engine_cs *engine;
467 467
468 lockdep_assert_held(&dev_priv->dev->struct_mutex); 468 lockdep_assert_held(&dev_priv->drm.struct_mutex);
469 469
470 for_each_engine(engine, dev_priv) { 470 for_each_engine(engine, dev_priv) {
471 if (engine->last_context) { 471 if (engine->last_context) {
@@ -895,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
895 struct intel_engine_cs *engine = req->engine; 895 struct intel_engine_cs *engine = req->engine;
896 896
897 WARN_ON(i915.enable_execlists); 897 WARN_ON(i915.enable_execlists);
898 lockdep_assert_held(&req->i915->dev->struct_mutex); 898 lockdep_assert_held(&req->i915->drm.struct_mutex);
899 899
900 if (!req->ctx->engine[engine->id].state) { 900 if (!req->ctx->engine[engine->id].state) {
901 struct i915_gem_context *to = req->ctx; 901 struct i915_gem_context *to = req->ctx;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a04d37be4e9..1978633e7549 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1328 /* Check whether the file_priv has already selected one ring. */ 1328 /* Check whether the file_priv has already selected one ring. */
1329 if ((int)file_priv->bsd_ring < 0) { 1329 if ((int)file_priv->bsd_ring < 0) {
1330 /* If not, use the ping-pong mechanism to select one. */ 1330 /* If not, use the ping-pong mechanism to select one. */
1331 mutex_lock(&dev_priv->dev->struct_mutex); 1331 mutex_lock(&dev_priv->drm.struct_mutex);
1332 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; 1332 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1333 dev_priv->mm.bsd_ring_dispatch_index ^= 1; 1333 dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1334 mutex_unlock(&dev_priv->dev->struct_mutex); 1334 mutex_unlock(&dev_priv->drm.struct_mutex);
1335 } 1335 }
1336 1336
1337 return file_priv->bsd_ring; 1337 return file_priv->bsd_ring;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9d6ea275be89..365fe65950e1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -153,7 +153,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
153#endif 153#endif
154 154
155 /* Early VLV doesn't have this */ 155 /* Early VLV doesn't have this */
156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) { 156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
158 return 0; 158 return 0;
159 } 159 }
@@ -2115,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
2115 struct drm_i915_private *dev_priv) 2115 struct drm_i915_private *dev_priv)
2116{ 2116{
2117 drm_mm_init(&vm->mm, vm->start, vm->total); 2117 drm_mm_init(&vm->mm, vm->start, vm->total);
2118 vm->dev = dev_priv->dev; 2118 vm->dev = &dev_priv->drm;
2119 INIT_LIST_HEAD(&vm->active_list); 2119 INIT_LIST_HEAD(&vm->active_list);
2120 INIT_LIST_HEAD(&vm->inactive_list); 2120 INIT_LIST_HEAD(&vm->inactive_list);
2121 list_add_tail(&vm->global_link, &dev_priv->vm_list); 2121 list_add_tail(&vm->global_link, &dev_priv->vm_list);
@@ -3179,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
3179 struct drm_i915_private *dev_priv = to_i915(dev); 3179 struct drm_i915_private *dev_priv = to_i915(dev);
3180 int ret; 3180 int ret;
3181 3181
3182 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); 3182 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3183 if (!ret) { 3183 if (!ret) {
3184 DRM_ERROR("failed to set up gmch\n"); 3184 DRM_ERROR("failed to set up gmch\n");
3185 return -EIO; 3185 return -EIO;
@@ -3188,7 +3188,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
3188 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, 3188 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
3189 &ggtt->mappable_base, &ggtt->mappable_end); 3189 &ggtt->mappable_base, &ggtt->mappable_end);
3190 3190
3191 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); 3191 ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
3192 ggtt->base.insert_page = i915_ggtt_insert_page; 3192 ggtt->base.insert_page = i915_ggtt_insert_page;
3193 ggtt->base.insert_entries = i915_ggtt_insert_entries; 3193 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3194 ggtt->base.clear_range = i915_ggtt_clear_range; 3194 ggtt->base.clear_range = i915_ggtt_clear_range;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index b7c1b5fb61ea..f75bbd67a13a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -58,7 +58,7 @@ static int render_state_init(struct render_state *so,
58 if (so->rodata->batch_items * 4 > 4096) 58 if (so->rodata->batch_items * 4 > 4096)
59 return -EINVAL; 59 return -EINVAL;
60 60
61 so->obj = i915_gem_object_create(dev_priv->dev, 4096); 61 so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
62 if (IS_ERR(so->obj)) 62 if (IS_ERR(so->obj))
63 return PTR_ERR(so->obj); 63 return PTR_ERR(so->obj);
64 64
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1bf14544d8ad..067632ad2f29 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -257,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
257{ 257{
258 struct drm_i915_private *dev_priv = 258 struct drm_i915_private *dev_priv =
259 container_of(shrinker, struct drm_i915_private, mm.shrinker); 259 container_of(shrinker, struct drm_i915_private, mm.shrinker);
260 struct drm_device *dev = dev_priv->dev; 260 struct drm_device *dev = &dev_priv->drm;
261 struct drm_i915_gem_object *obj; 261 struct drm_i915_gem_object *obj;
262 unsigned long count; 262 unsigned long count;
263 bool unlock; 263 bool unlock;
@@ -288,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
288{ 288{
289 struct drm_i915_private *dev_priv = 289 struct drm_i915_private *dev_priv =
290 container_of(shrinker, struct drm_i915_private, mm.shrinker); 290 container_of(shrinker, struct drm_i915_private, mm.shrinker);
291 struct drm_device *dev = dev_priv->dev; 291 struct drm_device *dev = &dev_priv->drm;
292 unsigned long freed; 292 unsigned long freed;
293 bool unlock; 293 bool unlock;
294 294
@@ -323,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
323{ 323{
324 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; 324 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
325 325
326 while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) { 326 while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
327 schedule_timeout_killable(1); 327 schedule_timeout_killable(1);
328 if (fatal_signal_pending(current)) 328 if (fatal_signal_pending(current))
329 return false; 329 return false;
@@ -344,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
344{ 344{
345 dev_priv->mm.interruptible = slu->was_interruptible; 345 dev_priv->mm.interruptible = slu->was_interruptible;
346 if (slu->unlock) 346 if (slu->unlock)
347 mutex_unlock(&dev_priv->dev->struct_mutex); 347 mutex_unlock(&dev_priv->drm.struct_mutex);
348} 348}
349 349
350static int 350static int
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 4e8cacfc4e48..9d73d2216adc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1276,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1276static void i915_capture_reg_state(struct drm_i915_private *dev_priv, 1276static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1277 struct drm_i915_error_state *error) 1277 struct drm_i915_error_state *error)
1278{ 1278{
1279 struct drm_device *dev = dev_priv->dev; 1279 struct drm_device *dev = &dev_priv->drm;
1280 int i; 1280 int i;
1281 1281
1282 /* General organization 1282 /* General organization
@@ -1446,7 +1446,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1446 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1446 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1447 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1447 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1448 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1448 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1449 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index); 1449 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1450 dev_priv->drm.primary->index);
1450 warned = true; 1451 warned = true;
1451 } 1452 }
1452} 1453}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 3f9e3fe98faf..bfc8bf672f2d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -622,7 +622,7 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
622{ 622{
623 struct drm_i915_gem_object *obj; 623 struct drm_i915_gem_object *obj;
624 624
625 obj = i915_gem_object_create(dev_priv->dev, size); 625 obj = i915_gem_object_create(&dev_priv->drm, size);
626 if (IS_ERR(obj)) 626 if (IS_ERR(obj))
627 return NULL; 627 return NULL;
628 628
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3eadc8375449..b77d808b71cd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -378,7 +378,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
378 ~dev_priv->pm_rps_events); 378 ~dev_priv->pm_rps_events);
379 379
380 spin_unlock_irq(&dev_priv->irq_lock); 380 spin_unlock_irq(&dev_priv->irq_lock);
381 synchronize_irq(dev_priv->dev->irq); 381 synchronize_irq(dev_priv->drm.irq);
382 382
383 /* Now that we will not be generating any more work, flush any 383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path, 384 * outsanding tasks. As we are called on the RPS idle path,
@@ -566,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
566 u32 enable_mask; 566 u32 enable_mask;
567 567
568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
569 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 569 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
570 status_mask); 570 status_mask);
571 else 571 else
572 enable_mask = status_mask << 16; 572 enable_mask = status_mask << 16;
@@ -580,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
580 u32 enable_mask; 580 u32 enable_mask;
581 581
582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
583 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 583 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
584 status_mask); 584 status_mask);
585 else 585 else
586 enable_mask = status_mask << 16; 586 enable_mask = status_mask << 16;
@@ -1175,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1175 * In order to prevent a get/put style interface, acquire struct mutex 1175 * In order to prevent a get/put style interface, acquire struct mutex
1176 * any time we access those registers. 1176 * any time we access those registers.
1177 */ 1177 */
1178 mutex_lock(&dev_priv->dev->struct_mutex); 1178 mutex_lock(&dev_priv->drm.struct_mutex);
1179 1179
1180 /* If we've screwed up tracking, just let the interrupt fire again */ 1180 /* If we've screwed up tracking, just let the interrupt fire again */
1181 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1181 if (WARN_ON(!dev_priv->l3_parity.which_slice))
@@ -1211,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1212 parity_event[5] = NULL; 1212 parity_event[5] = NULL;
1213 1213
1214 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1214 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1215 KOBJ_CHANGE, parity_event); 1215 KOBJ_CHANGE, parity_event);
1216 1216
1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1231,7 +1231,7 @@ out:
1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1232 spin_unlock_irq(&dev_priv->irq_lock); 1232 spin_unlock_irq(&dev_priv->irq_lock);
1233 1233
1234 mutex_unlock(&dev_priv->dev->struct_mutex); 1234 mutex_unlock(&dev_priv->drm.struct_mutex);
1235} 1235}
1236 1236
1237static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1237static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -1513,7 +1513,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1513 1513
1514 entry = &pipe_crc->entries[head]; 1514 entry = &pipe_crc->entries[head];
1515 1515
1516 entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev, 1516 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1517 pipe); 1517 pipe);
1518 entry->crc[0] = crc0; 1518 entry->crc[0] = crc0;
1519 entry->crc[1] = crc1; 1519 entry->crc[1] = crc1;
@@ -1611,7 +1611,7 @@ static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1611{ 1611{
1612 bool ret; 1612 bool ret;
1613 1613
1614 ret = drm_handle_vblank(dev_priv->dev, pipe); 1614 ret = drm_handle_vblank(&dev_priv->drm, pipe);
1615 if (ret) 1615 if (ret)
1616 intel_finish_page_flip_mmio(dev_priv, pipe); 1616 intel_finish_page_flip_mmio(dev_priv, pipe);
1617 1617
@@ -2500,7 +2500,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2500 */ 2500 */
2501static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2501static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2502{ 2502{
2503 struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj; 2503 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -3402,7 +3402,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3402 spin_unlock_irq(&dev_priv->irq_lock); 3402 spin_unlock_irq(&dev_priv->irq_lock);
3403 3403
3404 /* make sure we're done processing display irqs */ 3404 /* make sure we're done processing display irqs */
3405 synchronize_irq(dev_priv->dev->irq); 3405 synchronize_irq(dev_priv->drm.irq);
3406} 3406}
3407 3407
3408static void cherryview_irq_preinstall(struct drm_device *dev) 3408static void cherryview_irq_preinstall(struct drm_device *dev)
@@ -3428,7 +3428,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3428 struct intel_encoder *encoder; 3428 struct intel_encoder *encoder;
3429 u32 enabled_irqs = 0; 3429 u32 enabled_irqs = 0;
3430 3430
3431 for_each_intel_encoder(dev_priv->dev, encoder) 3431 for_each_intel_encoder(&dev_priv->drm, encoder)
3432 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3432 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3433 enabled_irqs |= hpd[encoder->hpd_pin]; 3433 enabled_irqs |= hpd[encoder->hpd_pin];
3434 3434
@@ -4510,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
4510 */ 4510 */
4511void intel_irq_init(struct drm_i915_private *dev_priv) 4511void intel_irq_init(struct drm_i915_private *dev_priv)
4512{ 4512{
4513 struct drm_device *dev = dev_priv->dev; 4513 struct drm_device *dev = &dev_priv->drm;
4514 4514
4515 intel_hpd_init_work(dev_priv); 4515 intel_hpd_init_work(dev_priv);
4516 4516
@@ -4644,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
4644 */ 4644 */
4645 dev_priv->pm.irqs_enabled = true; 4645 dev_priv->pm.irqs_enabled = true;
4646 4646
4647 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4647 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4648} 4648}
4649 4649
4650/** 4650/**
@@ -4656,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
4656 */ 4656 */
4657void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4657void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4658{ 4658{
4659 drm_irq_uninstall(dev_priv->dev); 4659 drm_irq_uninstall(&dev_priv->drm);
4660 intel_hpd_cancel_work(dev_priv); 4660 intel_hpd_cancel_work(dev_priv);
4661 dev_priv->pm.irqs_enabled = false; 4661 dev_priv->pm.irqs_enabled = false;
4662} 4662}
@@ -4670,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4670 */ 4670 */
4671void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4671void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4672{ 4672{
4673 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4673 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4674 dev_priv->pm.irqs_enabled = false; 4674 dev_priv->pm.irqs_enabled = false;
4675 synchronize_irq(dev_priv->dev->irq); 4675 synchronize_irq(dev_priv->drm.irq);
4676} 4676}
4677 4677
4678/** 4678/**
@@ -4685,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4685void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4685void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4686{ 4686{
4687 dev_priv->pm.irqs_enabled = true; 4687 dev_priv->pm.irqs_enabled = true;
4688 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4688 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4689 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4689 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4690} 4690}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f59cf07184ae..534154e05fbe 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
118 ), 118 ),
119 119
120 TP_fast_assign( 120 TP_fast_assign(
121 __entry->dev = i915->dev->primary->index; 121 __entry->dev = i915->drm.primary->index;
122 __entry->target = target; 122 __entry->target = target;
123 __entry->flags = flags; 123 __entry->flags = flags;
124 ), 124 ),
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
462 ), 462 ),
463 463
464 TP_fast_assign( 464 TP_fast_assign(
465 __entry->dev = from->i915->dev->primary->index; 465 __entry->dev = from->i915->drm.primary->index;
466 __entry->sync_from = from->id; 466 __entry->sync_from = from->id;
467 __entry->sync_to = to_req->engine->id; 467 __entry->sync_to = to_req->engine->id;
468 __entry->seqno = i915_gem_request_get_seqno(req); 468 __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,7 +486,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
486 ), 486 ),
487 487
488 TP_fast_assign( 488 TP_fast_assign(
489 __entry->dev = req->i915->dev->primary->index; 489 __entry->dev = req->i915->drm.primary->index;
490 __entry->ring = req->engine->id; 490 __entry->ring = req->engine->id;
491 __entry->seqno = req->seqno; 491 __entry->seqno = req->seqno;
492 __entry->flags = flags; 492 __entry->flags = flags;
@@ -509,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
509 ), 509 ),
510 510
511 TP_fast_assign( 511 TP_fast_assign(
512 __entry->dev = req->i915->dev->primary->index; 512 __entry->dev = req->i915->drm.primary->index;
513 __entry->ring = req->engine->id; 513 __entry->ring = req->engine->id;
514 __entry->invalidate = invalidate; 514 __entry->invalidate = invalidate;
515 __entry->flush = flush; 515 __entry->flush = flush;
@@ -531,7 +531,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
531 ), 531 ),
532 532
533 TP_fast_assign( 533 TP_fast_assign(
534 __entry->dev = req->i915->dev->primary->index; 534 __entry->dev = req->i915->drm.primary->index;
535 __entry->ring = req->engine->id; 535 __entry->ring = req->engine->id;
536 __entry->seqno = req->seqno; 536 __entry->seqno = req->seqno;
537 ), 537 ),
@@ -556,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
556 ), 556 ),
557 557
558 TP_fast_assign( 558 TP_fast_assign(
559 __entry->dev = engine->i915->dev->primary->index; 559 __entry->dev = engine->i915->drm.primary->index;
560 __entry->ring = engine->id; 560 __entry->ring = engine->id;
561 __entry->seqno = intel_engine_get_seqno(engine); 561 __entry->seqno = intel_engine_get_seqno(engine);
562 ), 562 ),
@@ -593,11 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
593 * less desirable. 593 * less desirable.
594 */ 594 */
595 TP_fast_assign( 595 TP_fast_assign(
596 __entry->dev = req->i915->dev->primary->index; 596 __entry->dev = req->i915->drm.primary->index;
597 __entry->ring = req->engine->id; 597 __entry->ring = req->engine->id;
598 __entry->seqno = req->seqno; 598 __entry->seqno = req->seqno;
599 __entry->blocking = 599 __entry->blocking =
600 mutex_is_locked(&req->i915->dev->struct_mutex); 600 mutex_is_locked(&req->i915->drm.struct_mutex);
601 ), 601 ),
602 602
603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", 603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -746,7 +746,7 @@ DECLARE_EVENT_CLASS(i915_context,
746 TP_fast_assign( 746 TP_fast_assign(
747 __entry->ctx = ctx; 747 __entry->ctx = ctx;
748 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; 748 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
749 __entry->dev = ctx->i915->dev->primary->index; 749 __entry->dev = ctx->i915->drm.primary->index;
750 ), 750 ),
751 751
752 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 752 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
@@ -786,7 +786,7 @@ TRACE_EVENT(switch_mm,
786 __entry->ring = engine->id; 786 __entry->ring = engine->id;
787 __entry->to = to; 787 __entry->to = to;
788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; 788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
789 __entry->dev = engine->i915->dev->primary->index; 789 __entry->dev = engine->i915->drm.primary->index;
790 ), 790 ),
791 791
792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", 792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index e2e4d4c59e85..161f6c2bca36 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -749,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
749 if (WARN_ON(acomp->ops || acomp->dev)) 749 if (WARN_ON(acomp->ops || acomp->dev))
750 return -EEXIST; 750 return -EEXIST;
751 751
752 drm_modeset_lock_all(dev_priv->dev); 752 drm_modeset_lock_all(&dev_priv->drm);
753 acomp->ops = &i915_audio_component_ops; 753 acomp->ops = &i915_audio_component_ops;
754 acomp->dev = i915_dev; 754 acomp->dev = i915_dev;
755 BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); 755 BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
756 for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) 756 for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
757 acomp->aud_sample_rate[i] = 0; 757 acomp->aud_sample_rate[i] = 0;
758 dev_priv->audio_component = acomp; 758 dev_priv->audio_component = acomp;
759 drm_modeset_unlock_all(dev_priv->dev); 759 drm_modeset_unlock_all(&dev_priv->drm);
760 760
761 return 0; 761 return 0;
762} 762}
@@ -767,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
767 struct i915_audio_component *acomp = data; 767 struct i915_audio_component *acomp = data;
768 struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); 768 struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
769 769
770 drm_modeset_lock_all(dev_priv->dev); 770 drm_modeset_lock_all(&dev_priv->drm);
771 acomp->ops = NULL; 771 acomp->ops = NULL;
772 acomp->dev = NULL; 772 acomp->dev = NULL;
773 dev_priv->audio_component = NULL; 773 dev_priv->audio_component = NULL;
774 drm_modeset_unlock_all(dev_priv->dev); 774 drm_modeset_unlock_all(&dev_priv->drm);
775} 775}
776 776
777static const struct component_ops i915_audio_component_bind_ops = { 777static const struct component_ops i915_audio_component_bind_ops = {
@@ -799,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
799{ 799{
800 int ret; 800 int ret;
801 801
802 ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops); 802 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
803 if (ret < 0) { 803 if (ret < 0) {
804 DRM_ERROR("failed to add audio component (%d)\n", ret); 804 DRM_ERROR("failed to add audio component (%d)\n", ret);
805 /* continue with reduced functionality */ 805 /* continue with reduced functionality */
@@ -821,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
821 if (!dev_priv->audio_component_registered) 821 if (!dev_priv->audio_component_registered)
822 return; 822 return;
823 823
824 component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops); 824 component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
825 dev_priv->audio_component_registered = false; 825 dev_priv->audio_component_registered = false;
826} 826}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index da5ed4a850b9..c6e69e4cfa83 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1426,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
1426int 1426int
1427intel_bios_init(struct drm_i915_private *dev_priv) 1427intel_bios_init(struct drm_i915_private *dev_priv)
1428{ 1428{
1429 struct pci_dev *pdev = dev_priv->dev->pdev; 1429 struct pci_dev *pdev = dev_priv->drm.pdev;
1430 const struct vbt_header *vbt = dev_priv->opregion.vbt; 1430 const struct vbt_header *vbt = dev_priv->opregion.vbt;
1431 const struct bdb_header *bdb; 1431 const struct bdb_header *bdb;
1432 u8 __iomem *bios = NULL; 1432 u8 __iomem *bios = NULL;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index ea047cd46b71..c3b33a10c15c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -412,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
412 csr = &dev_priv->csr; 412 csr = &dev_priv->csr;
413 413
414 ret = request_firmware(&fw, dev_priv->csr.fw_path, 414 ret = request_firmware(&fw, dev_priv->csr.fw_path,
415 &dev_priv->dev->pdev->dev); 415 &dev_priv->drm.pdev->dev);
416 if (fw) 416 if (fw)
417 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); 417 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
418 418
@@ -426,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
426 CSR_VERSION_MAJOR(csr->version), 426 CSR_VERSION_MAJOR(csr->version),
427 CSR_VERSION_MINOR(csr->version)); 427 CSR_VERSION_MINOR(csr->version));
428 } else { 428 } else {
429 dev_notice(dev_priv->dev->dev, 429 dev_notice(dev_priv->drm.dev,
430 "Failed to load DMC firmware" 430 "Failed to load DMC firmware"
431 " [" FIRMWARE_URL "]," 431 " [" FIRMWARE_URL "],"
432 " disabling runtime power management.\n"); 432 " disabling runtime power management.\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4ff6e9304ef1..111b350d1d7e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1235,7 +1235,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1235void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1235void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1236 enum pipe pipe) 1236 enum pipe pipe)
1237{ 1237{
1238 struct drm_device *dev = dev_priv->dev; 1238 struct drm_device *dev = &dev_priv->drm;
1239 i915_reg_t pp_reg; 1239 i915_reg_t pp_reg;
1240 u32 val; 1240 u32 val;
1241 enum pipe panel_pipe = PIPE_A; 1241 enum pipe panel_pipe = PIPE_A;
@@ -1277,7 +1277,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1277static void assert_cursor(struct drm_i915_private *dev_priv, 1277static void assert_cursor(struct drm_i915_private *dev_priv,
1278 enum pipe pipe, bool state) 1278 enum pipe pipe, bool state)
1279{ 1279{
1280 struct drm_device *dev = dev_priv->dev; 1280 struct drm_device *dev = &dev_priv->drm;
1281 bool cur_state; 1281 bool cur_state;
1282 1282
1283 if (IS_845G(dev) || IS_I865G(dev)) 1283 if (IS_845G(dev) || IS_I865G(dev))
@@ -1339,7 +1339,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
1339static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1339static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1340 enum pipe pipe) 1340 enum pipe pipe)
1341{ 1341{
1342 struct drm_device *dev = dev_priv->dev; 1342 struct drm_device *dev = &dev_priv->drm;
1343 int i; 1343 int i;
1344 1344
1345 /* Primary planes are fixed to pipes on gen4+ */ 1345 /* Primary planes are fixed to pipes on gen4+ */
@@ -1365,7 +1365,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1365static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1365static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1366 enum pipe pipe) 1366 enum pipe pipe)
1367{ 1367{
1368 struct drm_device *dev = dev_priv->dev; 1368 struct drm_device *dev = &dev_priv->drm;
1369 int sprite; 1369 int sprite;
1370 1370
1371 if (INTEL_INFO(dev)->gen >= 9) { 1371 if (INTEL_INFO(dev)->gen >= 9) {
@@ -1830,7 +1830,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1830static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1830static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1831 enum pipe pipe) 1831 enum pipe pipe)
1832{ 1832{
1833 struct drm_device *dev = dev_priv->dev; 1833 struct drm_device *dev = &dev_priv->drm;
1834 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1834 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1835 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1835 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1836 i915_reg_t reg; 1836 i915_reg_t reg;
@@ -1921,7 +1921,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1921static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1921static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1922 enum pipe pipe) 1922 enum pipe pipe)
1923{ 1923{
1924 struct drm_device *dev = dev_priv->dev; 1924 struct drm_device *dev = &dev_priv->drm;
1925 i915_reg_t reg; 1925 i915_reg_t reg;
1926 uint32_t val; 1926 uint32_t val;
1927 1927
@@ -3137,7 +3137,7 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3137{ 3137{
3138 struct intel_crtc *crtc; 3138 struct intel_crtc *crtc;
3139 3139
3140 for_each_intel_crtc(dev_priv->dev, crtc) 3140 for_each_intel_crtc(&dev_priv->drm, crtc)
3141 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3141 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3142} 3142}
3143 3143
@@ -3171,12 +3171,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3171 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3171 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3172 return; 3172 return;
3173 3173
3174 drm_modeset_lock_all(dev_priv->dev); 3174 drm_modeset_lock_all(&dev_priv->drm);
3175 /* 3175 /*
3176 * Disabling the crtcs gracefully seems nicer. Also the 3176 * Disabling the crtcs gracefully seems nicer. Also the
3177 * g33 docs say we should at least disable all the planes. 3177 * g33 docs say we should at least disable all the planes.
3178 */ 3178 */
3179 intel_display_suspend(dev_priv->dev); 3179 intel_display_suspend(&dev_priv->drm);
3180} 3180}
3181 3181
3182void intel_finish_reset(struct drm_i915_private *dev_priv) 3182void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3203,7 +3203,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3203 * FIXME: Atomic will make this obsolete since we won't schedule 3203 * FIXME: Atomic will make this obsolete since we won't schedule
3204 * CS-based flips (which might get lost in gpu resets) any more. 3204 * CS-based flips (which might get lost in gpu resets) any more.
3205 */ 3205 */
3206 intel_update_primary_planes(dev_priv->dev); 3206 intel_update_primary_planes(&dev_priv->drm);
3207 return; 3207 return;
3208 } 3208 }
3209 3209
@@ -3214,18 +3214,18 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3214 intel_runtime_pm_disable_interrupts(dev_priv); 3214 intel_runtime_pm_disable_interrupts(dev_priv);
3215 intel_runtime_pm_enable_interrupts(dev_priv); 3215 intel_runtime_pm_enable_interrupts(dev_priv);
3216 3216
3217 intel_modeset_init_hw(dev_priv->dev); 3217 intel_modeset_init_hw(&dev_priv->drm);
3218 3218
3219 spin_lock_irq(&dev_priv->irq_lock); 3219 spin_lock_irq(&dev_priv->irq_lock);
3220 if (dev_priv->display.hpd_irq_setup) 3220 if (dev_priv->display.hpd_irq_setup)
3221 dev_priv->display.hpd_irq_setup(dev_priv); 3221 dev_priv->display.hpd_irq_setup(dev_priv);
3222 spin_unlock_irq(&dev_priv->irq_lock); 3222 spin_unlock_irq(&dev_priv->irq_lock);
3223 3223
3224 intel_display_resume(dev_priv->dev); 3224 intel_display_resume(&dev_priv->drm);
3225 3225
3226 intel_hpd_init(dev_priv); 3226 intel_hpd_init(dev_priv);
3227 3227
3228 drm_modeset_unlock_all(dev_priv->dev); 3228 drm_modeset_unlock_all(&dev_priv->drm);
3229} 3229}
3230 3230
3231static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3231static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -5524,14 +5524,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5524 return; 5524 return;
5525 } 5525 }
5526 5526
5527 intel_update_cdclk(dev_priv->dev); 5527 intel_update_cdclk(&dev_priv->drm);
5528} 5528}
5529 5529
5530static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) 5530static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5531{ 5531{
5532 u32 cdctl, expected; 5532 u32 cdctl, expected;
5533 5533
5534 intel_update_cdclk(dev_priv->dev); 5534 intel_update_cdclk(&dev_priv->drm);
5535 5535
5536 if (dev_priv->cdclk_pll.vco == 0 || 5536 if (dev_priv->cdclk_pll.vco == 0 ||
5537 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5537 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -5664,7 +5664,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5664 dev_priv->skl_preferred_vco_freq = vco; 5664 dev_priv->skl_preferred_vco_freq = vco;
5665 5665
5666 if (changed) 5666 if (changed)
5667 intel_update_max_cdclk(dev_priv->dev); 5667 intel_update_max_cdclk(&dev_priv->drm);
5668} 5668}
5669 5669
5670static void 5670static void
@@ -5758,7 +5758,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5758 5758
5759static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) 5759static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5760{ 5760{
5761 struct drm_device *dev = dev_priv->dev; 5761 struct drm_device *dev = &dev_priv->drm;
5762 u32 freq_select, pcu_ack; 5762 u32 freq_select, pcu_ack;
5763 5763
5764 WARN_ON((cdclk == 24000) != (vco == 0)); 5764 WARN_ON((cdclk == 24000) != (vco == 0));
@@ -5856,7 +5856,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5856 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5856 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5857 goto sanitize; 5857 goto sanitize;
5858 5858
5859 intel_update_cdclk(dev_priv->dev); 5859 intel_update_cdclk(&dev_priv->drm);
5860 /* Is PLL enabled and locked ? */ 5860 /* Is PLL enabled and locked ? */
5861 if (dev_priv->cdclk_pll.vco == 0 || 5861 if (dev_priv->cdclk_pll.vco == 0 ||
5862 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5862 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -9488,7 +9488,7 @@ out:
9488 9488
9489static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9489static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9490{ 9490{
9491 struct drm_device *dev = dev_priv->dev; 9491 struct drm_device *dev = &dev_priv->drm;
9492 struct intel_crtc *crtc; 9492 struct intel_crtc *crtc;
9493 9493
9494 for_each_intel_crtc(dev, crtc) 9494 for_each_intel_crtc(dev, crtc)
@@ -9522,7 +9522,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9522 9522
9523static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9523static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9524{ 9524{
9525 struct drm_device *dev = dev_priv->dev; 9525 struct drm_device *dev = &dev_priv->drm;
9526 9526
9527 if (IS_HASWELL(dev)) 9527 if (IS_HASWELL(dev))
9528 return I915_READ(D_COMP_HSW); 9528 return I915_READ(D_COMP_HSW);
@@ -9532,7 +9532,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9532 9532
9533static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9533static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9534{ 9534{
9535 struct drm_device *dev = dev_priv->dev; 9535 struct drm_device *dev = &dev_priv->drm;
9536 9536
9537 if (IS_HASWELL(dev)) { 9537 if (IS_HASWELL(dev)) {
9538 mutex_lock(&dev_priv->rps.hw_lock); 9538 mutex_lock(&dev_priv->rps.hw_lock);
@@ -9649,7 +9649,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9649 } 9649 }
9650 9650
9651 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9651 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9652 intel_update_cdclk(dev_priv->dev); 9652 intel_update_cdclk(&dev_priv->drm);
9653} 9653}
9654 9654
9655/* 9655/*
@@ -9677,7 +9677,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9677 */ 9677 */
9678void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9678void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9679{ 9679{
9680 struct drm_device *dev = dev_priv->dev; 9680 struct drm_device *dev = &dev_priv->drm;
9681 uint32_t val; 9681 uint32_t val;
9682 9682
9683 DRM_DEBUG_KMS("Enabling package C8+\n"); 9683 DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9694,7 +9694,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9694 9694
9695void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9695void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9696{ 9696{
9697 struct drm_device *dev = dev_priv->dev; 9697 struct drm_device *dev = &dev_priv->drm;
9698 uint32_t val; 9698 uint32_t val;
9699 9699
9700 DRM_DEBUG_KMS("Disabling package C8+\n"); 9700 DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -11142,7 +11142,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
11142 11142
11143void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) 11143void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11144{ 11144{
11145 struct drm_device *dev = dev_priv->dev; 11145 struct drm_device *dev = &dev_priv->drm;
11146 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11146 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11148 struct intel_flip_work *work; 11148 struct intel_flip_work *work;
@@ -11169,7 +11169,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11169 11169
11170void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 11170void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11171{ 11171{
11172 struct drm_device *dev = dev_priv->dev; 11172 struct drm_device *dev = &dev_priv->drm;
11173 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11173 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11175 struct intel_flip_work *work; 11175 struct intel_flip_work *work;
@@ -11628,7 +11628,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11628 11628
11629void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) 11629void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11630{ 11630{
11631 struct drm_device *dev = dev_priv->dev; 11631 struct drm_device *dev = &dev_priv->drm;
11632 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11632 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11633 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11633 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11634 struct intel_flip_work *work; 11634 struct intel_flip_work *work;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1083f5e2d45b..5e4d894968d6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -540,7 +540,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
540 540
541void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 541void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
542{ 542{
543 struct drm_device *dev = dev_priv->dev; 543 struct drm_device *dev = &dev_priv->drm;
544 struct intel_encoder *encoder; 544 struct intel_encoder *encoder;
545 545
546 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && 546 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
@@ -2286,7 +2286,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2286 * 2. Program DP PLL enable 2286 * 2. Program DP PLL enable
2287 */ 2287 */
2288 if (IS_GEN5(dev_priv)) 2288 if (IS_GEN5(dev_priv))
2289 intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe); 2289 intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
2290 2290
2291 intel_dp->DP |= DP_PLL_ENABLE; 2291 intel_dp->DP |= DP_PLL_ENABLE;
2292 2292
@@ -3387,7 +3387,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3387 I915_WRITE(intel_dp->output_reg, DP); 3387 I915_WRITE(intel_dp->output_reg, DP);
3388 POSTING_READ(intel_dp->output_reg); 3388 POSTING_READ(intel_dp->output_reg);
3389 3389
3390 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 3390 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
3391 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3391 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3392 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3392 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3393 } 3393 }
@@ -5147,9 +5147,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5147 } 5147 }
5148 5148
5149 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5149 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5150 intel_dp_set_drrs_state(dev_priv->dev, 5150 intel_dp_set_drrs_state(&dev_priv->drm,
5151 intel_dp->attached_connector->panel. 5151 intel_dp->attached_connector->panel.
5152 fixed_mode->vrefresh); 5152 fixed_mode->vrefresh);
5153 5153
5154 dev_priv->drrs.dp = NULL; 5154 dev_priv->drrs.dp = NULL;
5155 mutex_unlock(&dev_priv->drrs.mutex); 5155 mutex_unlock(&dev_priv->drrs.mutex);
@@ -5179,9 +5179,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
5179 goto unlock; 5179 goto unlock;
5180 5180
5181 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) 5181 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5182 intel_dp_set_drrs_state(dev_priv->dev, 5182 intel_dp_set_drrs_state(&dev_priv->drm,
5183 intel_dp->attached_connector->panel. 5183 intel_dp->attached_connector->panel.
5184 downclock_mode->vrefresh); 5184 downclock_mode->vrefresh);
5185 5185
5186unlock: 5186unlock:
5187 mutex_unlock(&dev_priv->drrs.mutex); 5187 mutex_unlock(&dev_priv->drrs.mutex);
@@ -5223,9 +5223,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
5223 5223
5224 /* invalidate means busy screen hence upclock */ 5224 /* invalidate means busy screen hence upclock */
5225 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5225 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5226 intel_dp_set_drrs_state(dev_priv->dev, 5226 intel_dp_set_drrs_state(&dev_priv->drm,
5227 dev_priv->drrs.dp->attached_connector->panel. 5227 dev_priv->drrs.dp->attached_connector->panel.
5228 fixed_mode->vrefresh); 5228 fixed_mode->vrefresh);
5229 5229
5230 mutex_unlock(&dev_priv->drrs.mutex); 5230 mutex_unlock(&dev_priv->drrs.mutex);
5231} 5231}
@@ -5268,9 +5268,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
5268 5268
5269 /* flush means busy screen hence upclock */ 5269 /* flush means busy screen hence upclock */
5270 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5270 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5271 intel_dp_set_drrs_state(dev_priv->dev, 5271 intel_dp_set_drrs_state(&dev_priv->drm,
5272 dev_priv->drrs.dp->attached_connector->panel. 5272 dev_priv->drrs.dp->attached_connector->panel.
5273 fixed_mode->vrefresh); 5273 fixed_mode->vrefresh);
5274 5274
5275 /* 5275 /*
5276 * flush also means no more activity hence schedule downclock, if all 5276 * flush also means no more activity hence schedule downclock, if all
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 8a4dac98b3d5..9098169b39c3 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
331static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 331static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
332 struct intel_shared_dpll *pll) 332 struct intel_shared_dpll *pll)
333{ 333{
334 struct drm_device *dev = dev_priv->dev; 334 struct drm_device *dev = &dev_priv->drm;
335 struct intel_crtc *crtc; 335 struct intel_crtc *crtc;
336 336
337 /* Make sure no transcoder isn't still depending on us. */ 337 /* Make sure no transcoder isn't still depending on us. */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 067b6f518e48..6a7ad3ed1463 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -392,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
392 struct intel_fbc *fbc = &dev_priv->fbc; 392 struct intel_fbc *fbc = &dev_priv->fbc;
393 struct intel_fbc_work *work = &fbc->work; 393 struct intel_fbc_work *work = &fbc->work;
394 struct intel_crtc *crtc = fbc->crtc; 394 struct intel_crtc *crtc = fbc->crtc;
395 struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; 395 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
396 396
397 if (drm_crtc_vblank_get(&crtc->base)) { 397 if (drm_crtc_vblank_get(&crtc->base)) {
398 DRM_ERROR("vblank not available for FBC on pipe %c\n", 398 DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -1210,7 +1210,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1210 if (!no_fbc_on_multiple_pipes(dev_priv)) 1210 if (!no_fbc_on_multiple_pipes(dev_priv))
1211 return; 1211 return;
1212 1212
1213 for_each_intel_crtc(dev_priv->dev, crtc) 1213 for_each_intel_crtc(&dev_priv->drm, crtc)
1214 if (intel_crtc_active(&crtc->base) && 1214 if (intel_crtc_active(&crtc->base) &&
1215 to_intel_plane_state(crtc->base.primary->state)->visible) 1215 to_intel_plane_state(crtc->base.primary->state)->visible)
1216 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); 1216 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 5205afd69ff6..86b00c6db1a6 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -693,9 +693,9 @@ out:
693 693
694static void intel_fbdev_suspend_worker(struct work_struct *work) 694static void intel_fbdev_suspend_worker(struct work_struct *work)
695{ 695{
696 intel_fbdev_set_suspend(container_of(work, 696 intel_fbdev_set_suspend(&container_of(work,
697 struct drm_i915_private, 697 struct drm_i915_private,
698 fbdev_suspend_work)->dev, 698 fbdev_suspend_work)->drm,
699 FBINFO_STATE_RUNNING, 699 FBINFO_STATE_RUNNING,
700 true); 700 true);
701} 701}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 3fca95501890..2aa744081f09 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
289 bool ret; 289 bool ret;
290 290
291 spin_lock_irqsave(&dev_priv->irq_lock, flags); 291 spin_lock_irqsave(&dev_priv->irq_lock, flags);
292 ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, 292 ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
293 enable); 293 enable);
294 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 294 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
295 295
@@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
334 intel_crtc->pch_fifo_underrun_disabled = !enable; 334 intel_crtc->pch_fifo_underrun_disabled = !enable;
335 335
336 if (HAS_PCH_IBX(dev_priv)) 336 if (HAS_PCH_IBX(dev_priv))
337 ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 337 ibx_set_fifo_underrun_reporting(&dev_priv->drm,
338 pch_transcoder,
338 enable); 339 enable);
339 else 340 else
340 cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 341 cpt_set_fifo_underrun_reporting(&dev_priv->drm,
342 pch_transcoder,
341 enable, old); 343 enable, old);
342 344
343 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 345 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
405 407
406 spin_lock_irq(&dev_priv->irq_lock); 408 spin_lock_irq(&dev_priv->irq_lock);
407 409
408 for_each_intel_crtc(dev_priv->dev, crtc) { 410 for_each_intel_crtc(&dev_priv->drm, crtc) {
409 if (crtc->cpu_fifo_underrun_disabled) 411 if (crtc->cpu_fifo_underrun_disabled)
410 continue; 412 continue;
411 413
@@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
432 434
433 spin_lock_irq(&dev_priv->irq_lock); 435 spin_lock_irq(&dev_priv->irq_lock);
434 436
435 for_each_intel_crtc(dev_priv->dev, crtc) { 437 for_each_intel_crtc(&dev_priv->drm, crtc) {
436 if (crtc->pch_fifo_underrun_disabled) 438 if (crtc->pch_fifo_underrun_disabled)
437 continue; 439 continue;
438 440
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index d925e2daeb24..605c69658d2c 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -314,7 +314,7 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
314static int guc_ucode_xfer(struct drm_i915_private *dev_priv) 314static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
315{ 315{
316 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 316 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
317 struct drm_device *dev = dev_priv->dev; 317 struct drm_device *dev = &dev_priv->drm;
318 int ret; 318 int ret;
319 319
320 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); 320 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4f4e4b535683..e94244266b19 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1154 I915_WRITE(intel_hdmi->hdmi_reg, temp); 1154 I915_WRITE(intel_hdmi->hdmi_reg, temp);
1155 POSTING_READ(intel_hdmi->hdmi_reg); 1155 POSTING_READ(intel_hdmi->hdmi_reg);
1156 1156
1157 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 1157 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
1158 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1158 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1159 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1159 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1160 } 1160 }
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 38eeca7a6e72..51434ec871f2 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
144 144
145static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) 145static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
146{ 146{
147 struct drm_device *dev = dev_priv->dev; 147 struct drm_device *dev = &dev_priv->drm;
148 struct drm_mode_config *mode_config = &dev->mode_config; 148 struct drm_mode_config *mode_config = &dev->mode_config;
149 struct intel_connector *intel_connector; 149 struct intel_connector *intel_connector;
150 struct intel_encoder *intel_encoder; 150 struct intel_encoder *intel_encoder;
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
191 struct drm_i915_private *dev_priv = 191 struct drm_i915_private *dev_priv =
192 container_of(work, typeof(*dev_priv), 192 container_of(work, typeof(*dev_priv),
193 hotplug.reenable_work.work); 193 hotplug.reenable_work.work);
194 struct drm_device *dev = dev_priv->dev; 194 struct drm_device *dev = &dev_priv->drm;
195 struct drm_mode_config *mode_config = &dev->mode_config; 195 struct drm_mode_config *mode_config = &dev->mode_config;
196 int i; 196 int i;
197 197
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
302{ 302{
303 struct drm_i915_private *dev_priv = 303 struct drm_i915_private *dev_priv =
304 container_of(work, struct drm_i915_private, hotplug.hotplug_work); 304 container_of(work, struct drm_i915_private, hotplug.hotplug_work);
305 struct drm_device *dev = dev_priv->dev; 305 struct drm_device *dev = &dev_priv->drm;
306 struct drm_mode_config *mode_config = &dev->mode_config; 306 struct drm_mode_config *mode_config = &dev->mode_config;
307 struct intel_connector *intel_connector; 307 struct intel_connector *intel_connector;
308 struct intel_encoder *intel_encoder; 308 struct intel_encoder *intel_encoder;
@@ -455,7 +455,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
455 */ 455 */
456void intel_hpd_init(struct drm_i915_private *dev_priv) 456void intel_hpd_init(struct drm_i915_private *dev_priv)
457{ 457{
458 struct drm_device *dev = dev_priv->dev; 458 struct drm_device *dev = &dev_priv->drm;
459 struct drm_mode_config *mode_config = &dev->mode_config; 459 struct drm_mode_config *mode_config = &dev->mode_config;
460 struct drm_connector *connector; 460 struct drm_connector *connector;
461 int i; 461 int i;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3c6f338866da..1f266d7df2ec 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
138static u32 get_reserved(struct intel_gmbus *bus) 138static u32 get_reserved(struct intel_gmbus *bus)
139{ 139{
140 struct drm_i915_private *dev_priv = bus->dev_priv; 140 struct drm_i915_private *dev_priv = bus->dev_priv;
141 struct drm_device *dev = dev_priv->dev; 141 struct drm_device *dev = &dev_priv->drm;
142 u32 reserved = 0; 142 u32 reserved = 0;
143 143
144 /* On most chips, these bits must be preserved in software. */ 144 /* On most chips, these bits must be preserved in software. */
@@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
212 adapter); 212 adapter);
213 struct drm_i915_private *dev_priv = bus->dev_priv; 213 struct drm_i915_private *dev_priv = bus->dev_priv;
214 214
215 intel_i2c_reset(dev_priv->dev); 215 intel_i2c_reset(&dev_priv->drm);
216 intel_i2c_quirk_set(dev_priv, true); 216 intel_i2c_quirk_set(dev_priv, true);
217 set_data(bus, 1); 217 set_data(bus, 1);
218 set_clock(bus, 1); 218 set_clock(bus, 1);
@@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
688 goto err; 688 goto err;
689 } 689 }
690 690
691 intel_i2c_reset(dev_priv->dev); 691 intel_i2c_reset(&dev_priv->drm);
692 692
693 return 0; 693 return 0;
694 694
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 676b53200e94..d47d1a0dbb91 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -899,7 +899,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
899 struct drm_i915_gem_request *req, *tmp; 899 struct drm_i915_gem_request *req, *tmp;
900 LIST_HEAD(cancel_list); 900 LIST_HEAD(cancel_list);
901 901
902 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex)); 902 WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
903 903
904 spin_lock_bh(&engine->execlist_lock); 904 spin_lock_bh(&engine->execlist_lock);
905 list_replace_init(&engine->execlist_queue, &cancel_list); 905 list_replace_init(&engine->execlist_queue, &cancel_list);
@@ -961,7 +961,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
961 u32 *lrc_reg_state; 961 u32 *lrc_reg_state;
962 int ret; 962 int ret;
963 963
964 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 964 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
965 965
966 if (ce->pin_count++) 966 if (ce->pin_count++)
967 return 0; 967 return 0;
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
1011{ 1011{
1012 struct intel_context *ce = &ctx->engine[engine->id]; 1012 struct intel_context *ce = &ctx->engine[engine->id];
1013 1013
1014 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 1014 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1015 GEM_BUG_ON(ce->pin_count == 0); 1015 GEM_BUG_ON(ce->pin_count == 0);
1016 1016
1017 if (--ce->pin_count) 1017 if (--ce->pin_count)
@@ -1353,8 +1353,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1353{ 1353{
1354 int ret; 1354 int ret;
1355 1355
1356 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev, 1356 engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
1357 PAGE_ALIGN(size)); 1357 PAGE_ALIGN(size));
1358 if (IS_ERR(engine->wa_ctx.obj)) { 1358 if (IS_ERR(engine->wa_ctx.obj)) {
1359 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1359 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1360 ret = PTR_ERR(engine->wa_ctx.obj); 1360 ret = PTR_ERR(engine->wa_ctx.obj);
@@ -2154,7 +2154,7 @@ logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
2154 logical_ring_default_irqs(engine, info->irq_shift); 2154 logical_ring_default_irqs(engine, info->irq_shift);
2155 2155
2156 intel_engine_init_hangcheck(engine); 2156 intel_engine_init_hangcheck(engine);
2157 i915_gem_batch_pool_init(dev_priv->dev, &engine->batch_pool); 2157 i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
2158 2158
2159 return engine; 2159 return engine;
2160} 2160}
@@ -2486,7 +2486,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2486 /* One extra page as the sharing data between driver and GuC */ 2486 /* One extra page as the sharing data between driver and GuC */
2487 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2487 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2488 2488
2489 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size); 2489 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
2490 if (IS_ERR(ctx_obj)) { 2490 if (IS_ERR(ctx_obj)) {
2491 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2491 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2492 return PTR_ERR(ctx_obj); 2492 return PTR_ERR(ctx_obj);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 82e687dd09c3..87a9bd040106 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -261,7 +261,7 @@ static int swsci(struct drm_i915_private *dev_priv,
261 u32 function, u32 parm, u32 *parm_out) 261 u32 function, u32 parm, u32 *parm_out)
262{ 262{
263 struct opregion_swsci *swsci = dev_priv->opregion.swsci; 263 struct opregion_swsci *swsci = dev_priv->opregion.swsci;
264 struct pci_dev *pdev = dev_priv->dev->pdev; 264 struct pci_dev *pdev = dev_priv->drm.pdev;
265 u32 main_function, sub_function, scic; 265 u32 main_function, sub_function, scic;
266 u16 swsci_val; 266 u16 swsci_val;
267 u32 dslp; 267 u32 dslp;
@@ -435,7 +435,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
435{ 435{
436 struct intel_connector *connector; 436 struct intel_connector *connector;
437 struct opregion_asle *asle = dev_priv->opregion.asle; 437 struct opregion_asle *asle = dev_priv->opregion.asle;
438 struct drm_device *dev = dev_priv->dev; 438 struct drm_device *dev = &dev_priv->drm;
439 439
440 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 440 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
441 441
@@ -718,7 +718,7 @@ static u32 acpi_display_type(struct drm_connector *connector)
718static void intel_didl_outputs(struct drm_i915_private *dev_priv) 718static void intel_didl_outputs(struct drm_i915_private *dev_priv)
719{ 719{
720 struct intel_opregion *opregion = &dev_priv->opregion; 720 struct intel_opregion *opregion = &dev_priv->opregion;
721 struct pci_dev *pdev = dev_priv->dev->pdev; 721 struct pci_dev *pdev = dev_priv->drm.pdev;
722 struct drm_connector *connector; 722 struct drm_connector *connector;
723 acpi_handle handle; 723 acpi_handle handle;
724 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 724 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -782,7 +782,8 @@ end:
782 782
783blind_set: 783blind_set:
784 i = 0; 784 i = 0;
785 list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) { 785 list_for_each_entry(connector,
786 &dev_priv->drm.mode_config.connector_list, head) {
786 int display_type = acpi_display_type(connector); 787 int display_type = acpi_display_type(connector);
787 788
788 if (i >= max_outputs) { 789 if (i >= max_outputs) {
@@ -954,7 +955,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
954int intel_opregion_setup(struct drm_i915_private *dev_priv) 955int intel_opregion_setup(struct drm_i915_private *dev_priv)
955{ 956{
956 struct intel_opregion *opregion = &dev_priv->opregion; 957 struct intel_opregion *opregion = &dev_priv->opregion;
957 struct pci_dev *pdev = dev_priv->dev->pdev; 958 struct pci_dev *pdev = dev_priv->drm.pdev;
958 u32 asls, mboxes; 959 u32 asls, mboxes;
959 char buf[sizeof(OPREGION_SIGNATURE)]; 960 char buf[sizeof(OPREGION_SIGNATURE)];
960 int err = 0; 961 int err = 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index e9887d9321a3..3212d8806b5a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
409 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 409 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
410 int ret; 410 int ret;
411 411
412 lockdep_assert_held(&dev_priv->dev->struct_mutex); 412 lockdep_assert_held(&dev_priv->drm.struct_mutex);
413 413
414 /* Only wait if there is actually an old frame to release to 414 /* Only wait if there is actually an old frame to release to
415 * guarantee forward progress. 415 * guarantee forward progress.
@@ -741,8 +741,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
741 u32 swidth, swidthsw, sheight, ostride; 741 u32 swidth, swidthsw, sheight, ostride;
742 enum pipe pipe = overlay->crtc->pipe; 742 enum pipe pipe = overlay->crtc->pipe;
743 743
744 lockdep_assert_held(&dev_priv->dev->struct_mutex); 744 lockdep_assert_held(&dev_priv->drm.struct_mutex);
745 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); 745 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
746 746
747 ret = intel_overlay_release_old_vid(overlay); 747 ret = intel_overlay_release_old_vid(overlay);
748 if (ret != 0) 748 if (ret != 0)
@@ -836,7 +836,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
836 overlay->old_vid_bo = overlay->vid_bo; 836 overlay->old_vid_bo = overlay->vid_bo;
837 overlay->vid_bo = new_bo; 837 overlay->vid_bo = new_bo;
838 838
839 intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe)); 839 intel_frontbuffer_flip(&dev_priv->drm,
840 INTEL_FRONTBUFFER_OVERLAY(pipe));
840 841
841 return 0; 842 return 0;
842 843
@@ -851,8 +852,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
851 struct overlay_registers __iomem *regs; 852 struct overlay_registers __iomem *regs;
852 int ret; 853 int ret;
853 854
854 lockdep_assert_held(&dev_priv->dev->struct_mutex); 855 lockdep_assert_held(&dev_priv->drm.struct_mutex);
855 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); 856 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
856 857
857 ret = intel_overlay_recover_from_interrupt(overlay); 858 ret = intel_overlay_recover_from_interrupt(overlay);
858 if (ret != 0) 859 if (ret != 0)
@@ -1379,7 +1380,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1379 if (!overlay) 1380 if (!overlay)
1380 return; 1381 return;
1381 1382
1382 mutex_lock(&dev_priv->dev->struct_mutex); 1383 mutex_lock(&dev_priv->drm.struct_mutex);
1383 if (WARN_ON(dev_priv->overlay)) 1384 if (WARN_ON(dev_priv->overlay))
1384 goto out_free; 1385 goto out_free;
1385 1386
@@ -1387,9 +1388,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1387 1388
1388 reg_bo = NULL; 1389 reg_bo = NULL;
1389 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) 1390 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1390 reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE); 1391 reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
1392 PAGE_SIZE);
1391 if (reg_bo == NULL) 1393 if (reg_bo == NULL)
1392 reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE); 1394 reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
1393 if (IS_ERR(reg_bo)) 1395 if (IS_ERR(reg_bo))
1394 goto out_free; 1396 goto out_free;
1395 overlay->reg_bo = reg_bo; 1397 overlay->reg_bo = reg_bo;
@@ -1434,7 +1436,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1434 intel_overlay_unmap_regs(overlay, regs); 1436 intel_overlay_unmap_regs(overlay, regs);
1435 1437
1436 dev_priv->overlay = overlay; 1438 dev_priv->overlay = overlay;
1437 mutex_unlock(&dev_priv->dev->struct_mutex); 1439 mutex_unlock(&dev_priv->drm.struct_mutex);
1438 DRM_INFO("initialized overlay support\n"); 1440 DRM_INFO("initialized overlay support\n");
1439 return; 1441 return;
1440 1442
@@ -1444,7 +1446,7 @@ out_unpin_bo:
1444out_free_bo: 1446out_free_bo:
1445 drm_gem_object_unreference(&reg_bo->base); 1447 drm_gem_object_unreference(&reg_bo->base);
1446out_free: 1448out_free:
1447 mutex_unlock(&dev_priv->dev->struct_mutex); 1449 mutex_unlock(&dev_priv->drm.struct_mutex);
1448 kfree(overlay); 1450 kfree(overlay);
1449 return; 1451 return;
1450} 1452}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a8b473350597..295f0ddca718 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
504 if (panel->backlight.combination_mode) { 504 if (panel->backlight.combination_mode) {
505 u8 lbpc; 505 u8 lbpc;
506 506
507 pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc); 507 pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
508 val *= lbpc; 508 val *= lbpc;
509 } 509 }
510 510
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
592 592
593 lbpc = level * 0xfe / panel->backlight.max + 1; 593 lbpc = level * 0xfe / panel->backlight.max + 1;
594 level /= lbpc; 594 level /= lbpc;
595 pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc); 595 pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
596 } 596 }
597 597
598 if (IS_GEN4(dev_priv)) { 598 if (IS_GEN4(dev_priv)) {
@@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
822 * backlight. This will leave the backlight on unnecessarily when 822 * backlight. This will leave the backlight on unnecessarily when
823 * another client is not activated. 823 * another client is not activated.
824 */ 824 */
825 if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { 825 if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
826 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); 826 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
827 return; 827 return;
828 } 828 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0560b7c90244..5a8ee0c76593 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -319,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
319 319
320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
321{ 321{
322 struct drm_device *dev = dev_priv->dev; 322 struct drm_device *dev = &dev_priv->drm;
323 u32 val; 323 u32 val;
324 324
325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -2236,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
2236static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2236static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2237 uint16_t wm[5], uint16_t min) 2237 uint16_t wm[5], uint16_t min)
2238{ 2238{
2239 int level, max_level = ilk_wm_max_level(dev_priv->dev); 2239 int level, max_level = ilk_wm_max_level(&dev_priv->drm);
2240 2240
2241 if (wm[0] >= min) 2241 if (wm[0] >= min)
2242 return false; 2242 return false;
@@ -2765,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2765static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2765static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2766 struct ilk_wm_values *results) 2766 struct ilk_wm_values *results)
2767{ 2767{
2768 struct drm_device *dev = dev_priv->dev; 2768 struct drm_device *dev = &dev_priv->drm;
2769 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2769 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2770 unsigned int dirty; 2770 unsigned int dirty;
2771 uint32_t val; 2771 uint32_t val;
@@ -3498,7 +3498,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3498 int level, 3498 int level,
3499 struct skl_wm_level *result) 3499 struct skl_wm_level *result)
3500{ 3500{
3501 struct drm_device *dev = dev_priv->dev;
3502 struct drm_atomic_state *state = cstate->base.state; 3501 struct drm_atomic_state *state = cstate->base.state;
3503 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3502 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3504 struct drm_plane *plane; 3503 struct drm_plane *plane;
@@ -3514,7 +3513,9 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3514 */ 3513 */
3515 memset(result, 0, sizeof(*result)); 3514 memset(result, 0, sizeof(*result));
3516 3515
3517 for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) { 3516 for_each_intel_plane_mask(&dev_priv->drm,
3517 intel_plane,
3518 cstate->base.plane_mask) {
3518 int i = skl_wm_plane_id(intel_plane); 3519 int i = skl_wm_plane_id(intel_plane);
3519 3520
3520 plane = &intel_plane->base; 3521 plane = &intel_plane->base;
@@ -3682,7 +3683,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3682static void skl_write_wm_values(struct drm_i915_private *dev_priv, 3683static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3683 const struct skl_wm_values *new) 3684 const struct skl_wm_values *new)
3684{ 3685{
3685 struct drm_device *dev = dev_priv->dev; 3686 struct drm_device *dev = &dev_priv->drm;
3686 struct intel_crtc *crtc; 3687 struct intel_crtc *crtc;
3687 3688
3688 for_each_intel_crtc(dev, crtc) { 3689 for_each_intel_crtc(dev, crtc) {
@@ -3779,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3779static void skl_flush_wm_values(struct drm_i915_private *dev_priv, 3780static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3780 struct skl_wm_values *new_values) 3781 struct skl_wm_values *new_values)
3781{ 3782{
3782 struct drm_device *dev = dev_priv->dev; 3783 struct drm_device *dev = &dev_priv->drm;
3783 struct skl_ddb_allocation *cur_ddb, *new_ddb; 3784 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3784 bool reallocated[I915_MAX_PIPES] = {}; 3785 bool reallocated[I915_MAX_PIPES] = {};
3785 struct intel_crtc *crtc; 3786 struct intel_crtc *crtc;
@@ -4056,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
4056 4057
4057static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 4058static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4058{ 4059{
4059 struct drm_device *dev = dev_priv->dev; 4060 struct drm_device *dev = &dev_priv->drm;
4060 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 4061 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4061 struct ilk_wm_maximums max; 4062 struct ilk_wm_maximums max;
4062 struct intel_wm_config config = {}; 4063 struct intel_wm_config config = {};
@@ -5699,7 +5700,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5699 u32 pcbr; 5700 u32 pcbr;
5700 int pctx_size = 24*1024; 5701 int pctx_size = 24*1024;
5701 5702
5702 mutex_lock(&dev_priv->dev->struct_mutex); 5703 mutex_lock(&dev_priv->drm.struct_mutex);
5703 5704
5704 pcbr = I915_READ(VLV_PCBR); 5705 pcbr = I915_READ(VLV_PCBR);
5705 if (pcbr) { 5706 if (pcbr) {
@@ -5707,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5707 int pcbr_offset; 5708 int pcbr_offset;
5708 5709
5709 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 5710 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5710 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 5711 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5711 pcbr_offset, 5712 pcbr_offset,
5712 I915_GTT_OFFSET_NONE, 5713 I915_GTT_OFFSET_NONE,
5713 pctx_size); 5714 pctx_size);
@@ -5724,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5724 * overlap with other ranges, such as the frame buffer, protected 5725 * overlap with other ranges, such as the frame buffer, protected
5725 * memory, or any other relevant ranges. 5726 * memory, or any other relevant ranges.
5726 */ 5727 */
5727 pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size); 5728 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5728 if (!pctx) { 5729 if (!pctx) {
5729 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5730 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5730 goto out; 5731 goto out;
@@ -5736,7 +5737,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5736out: 5737out:
5737 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5738 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5738 dev_priv->vlv_pctx = pctx; 5739 dev_priv->vlv_pctx = pctx;
5739 mutex_unlock(&dev_priv->dev->struct_mutex); 5740 mutex_unlock(&dev_priv->drm.struct_mutex);
5740} 5741}
5741 5742
5742static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) 5743static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
@@ -6680,9 +6681,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6680 6681
6681 if (IS_IRONLAKE_M(dev_priv)) { 6682 if (IS_IRONLAKE_M(dev_priv)) {
6682 ironlake_enable_drps(dev_priv); 6683 ironlake_enable_drps(dev_priv);
6683 mutex_lock(&dev_priv->dev->struct_mutex); 6684 mutex_lock(&dev_priv->drm.struct_mutex);
6684 intel_init_emon(dev_priv); 6685 intel_init_emon(dev_priv);
6685 mutex_unlock(&dev_priv->dev->struct_mutex); 6686 mutex_unlock(&dev_priv->drm.struct_mutex);
6686 } else if (INTEL_INFO(dev_priv)->gen >= 6) { 6687 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6687 /* 6688 /*
6688 * PCU communication is slow and this doesn't need to be 6689 * PCU communication is slow and this doesn't need to be
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 428ed62dc5de..61e00bf9e87f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -657,9 +657,9 @@ int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
657 657
658 WARN_ON(engine->scratch.obj); 658 WARN_ON(engine->scratch.obj);
659 659
660 obj = i915_gem_object_create_stolen(engine->i915->dev, size); 660 obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
661 if (!obj) 661 if (!obj)
662 obj = i915_gem_object_create(engine->i915->dev, size); 662 obj = i915_gem_object_create(&engine->i915->drm, size);
663 if (IS_ERR(obj)) { 663 if (IS_ERR(obj)) {
664 DRM_ERROR("Failed to allocate scratch page\n"); 664 DRM_ERROR("Failed to allocate scratch page\n");
665 ret = PTR_ERR(obj); 665 ret = PTR_ERR(obj);
@@ -1888,7 +1888,7 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1888 if (!dev_priv->status_page_dmah) 1888 if (!dev_priv->status_page_dmah)
1889 return; 1889 return;
1890 1890
1891 drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah); 1891 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1892 engine->status_page.page_addr = NULL; 1892 engine->status_page.page_addr = NULL;
1893} 1893}
1894 1894
@@ -1914,7 +1914,7 @@ static int init_status_page(struct intel_engine_cs *engine)
1914 unsigned flags; 1914 unsigned flags;
1915 int ret; 1915 int ret;
1916 1916
1917 obj = i915_gem_object_create(engine->i915->dev, 4096); 1917 obj = i915_gem_object_create(&engine->i915->drm, 4096);
1918 if (IS_ERR(obj)) { 1918 if (IS_ERR(obj)) {
1919 DRM_ERROR("Failed to allocate status page\n"); 1919 DRM_ERROR("Failed to allocate status page\n");
1920 return PTR_ERR(obj); 1920 return PTR_ERR(obj);
@@ -1963,7 +1963,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
1963 1963
1964 if (!dev_priv->status_page_dmah) { 1964 if (!dev_priv->status_page_dmah) {
1965 dev_priv->status_page_dmah = 1965 dev_priv->status_page_dmah =
1966 drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE); 1966 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1967 if (!dev_priv->status_page_dmah) 1967 if (!dev_priv->status_page_dmah)
1968 return -ENOMEM; 1968 return -ENOMEM;
1969 } 1969 }
@@ -2096,7 +2096,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2096 ring->last_retired_head = -1; 2096 ring->last_retired_head = -1;
2097 intel_ring_update_space(ring); 2097 intel_ring_update_space(ring);
2098 2098
2099 ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring); 2099 ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
2100 if (ret) { 2100 if (ret) {
2101 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2101 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2102 engine->name, ret); 2102 engine->name, ret);
@@ -2122,7 +2122,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
2122 struct intel_context *ce = &ctx->engine[engine->id]; 2122 struct intel_context *ce = &ctx->engine[engine->id];
2123 int ret; 2123 int ret;
2124 2124
2125 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 2125 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2126 2126
2127 if (ce->pin_count++) 2127 if (ce->pin_count++)
2128 return 0; 2128 return 0;
@@ -2156,7 +2156,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2156{ 2156{
2157 struct intel_context *ce = &ctx->engine[engine->id]; 2157 struct intel_context *ce = &ctx->engine[engine->id];
2158 2158
2159 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 2159 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2160 2160
2161 if (--ce->pin_count) 2161 if (--ce->pin_count)
2162 return; 2162 return;
@@ -2696,7 +2696,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2696 return; 2696 return;
2697 2697
2698 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { 2698 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
2699 obj = i915_gem_object_create(dev_priv->dev, 4096); 2699 obj = i915_gem_object_create(&dev_priv->drm, 4096);
2700 if (IS_ERR(obj)) { 2700 if (IS_ERR(obj)) {
2701 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2701 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2702 i915.semaphores = 0; 2702 i915.semaphores = 0;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7cbba42f0ab4..6b78295f53db 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -287,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
287 */ 287 */
288static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 288static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
289{ 289{
290 struct drm_device *dev = dev_priv->dev; 290 struct drm_device *dev = &dev_priv->drm;
291 291
292 /* 292 /*
293 * After we re-enable the power well, if we touch VGA register 0x3d5 293 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -318,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
318static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 318static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
319 struct i915_power_well *power_well) 319 struct i915_power_well *power_well)
320{ 320{
321 struct drm_device *dev = dev_priv->dev; 321 struct drm_device *dev = &dev_priv->drm;
322 322
323 /* 323 /*
324 * After we re-enable the power well, if we touch VGA register 0x3d5 324 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -930,7 +930,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
930 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 930 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
931 931
932 WARN_ON(dev_priv->cdclk_freq != 932 WARN_ON(dev_priv->cdclk_freq !=
933 dev_priv->display.get_display_clock_speed(dev_priv->dev)); 933 dev_priv->display.get_display_clock_speed(&dev_priv->drm));
934 934
935 gen9_assert_dbuf_enabled(dev_priv); 935 gen9_assert_dbuf_enabled(dev_priv);
936 936
@@ -1088,7 +1088,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1088 * 1088 *
1089 * CHV DPLL B/C have some issues if VGA mode is enabled. 1089 * CHV DPLL B/C have some issues if VGA mode is enabled.
1090 */ 1090 */
1091 for_each_pipe(dev_priv->dev, pipe) { 1091 for_each_pipe(&dev_priv->drm, pipe) {
1092 u32 val = I915_READ(DPLL(pipe)); 1092 u32 val = I915_READ(DPLL(pipe));
1093 1093
1094 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1094 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1113,7 +1113,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1113 1113
1114 intel_hpd_init(dev_priv); 1114 intel_hpd_init(dev_priv);
1115 1115
1116 i915_redisable_vga_power_on(dev_priv->dev); 1116 i915_redisable_vga_power_on(&dev_priv->drm);
1117} 1117}
1118 1118
1119static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1119static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -1123,7 +1123,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1123 spin_unlock_irq(&dev_priv->irq_lock); 1123 spin_unlock_irq(&dev_priv->irq_lock);
1124 1124
1125 /* make sure we're done processing display irqs */ 1125 /* make sure we're done processing display irqs */
1126 synchronize_irq(dev_priv->dev->irq); 1126 synchronize_irq(dev_priv->drm.irq);
1127 1127
1128 intel_power_sequencer_reset(dev_priv); 1128 intel_power_sequencer_reset(dev_priv);
1129} 1129}
@@ -2275,7 +2275,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
2275 */ 2275 */
2276void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2276void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2277{ 2277{
2278 struct device *device = &dev_priv->dev->pdev->dev; 2278 struct device *device = &dev_priv->drm.pdev->dev;
2279 2279
2280 /* 2280 /*
2281 * The i915.ko module is still not prepared to be loaded when 2281 * The i915.ko module is still not prepared to be loaded when
@@ -2576,7 +2576,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2576 */ 2576 */
2577void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2577void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2578{ 2578{
2579 struct drm_device *dev = dev_priv->dev; 2579 struct drm_device *dev = &dev_priv->drm;
2580 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2580 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2581 2581
2582 power_domains->initializing = true; 2582 power_domains->initializing = true;
@@ -2638,7 +2638,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2638 */ 2638 */
2639void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2639void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2640{ 2640{
2641 struct drm_device *dev = dev_priv->dev; 2641 struct drm_device *dev = &dev_priv->drm;
2642 struct device *device = &dev->pdev->dev; 2642 struct device *device = &dev->pdev->dev;
2643 2643
2644 pm_runtime_get_sync(device); 2644 pm_runtime_get_sync(device);
@@ -2659,7 +2659,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2659 */ 2659 */
2660bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2660bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2661{ 2661{
2662 struct drm_device *dev = dev_priv->dev; 2662 struct drm_device *dev = &dev_priv->drm;
2663 struct device *device = &dev->pdev->dev; 2663 struct device *device = &dev->pdev->dev;
2664 2664
2665 if (IS_ENABLED(CONFIG_PM)) { 2665 if (IS_ENABLED(CONFIG_PM)) {
@@ -2701,7 +2701,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2701 */ 2701 */
2702void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2702void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2703{ 2703{
2704 struct drm_device *dev = dev_priv->dev; 2704 struct drm_device *dev = &dev_priv->drm;
2705 struct device *device = &dev->pdev->dev; 2705 struct device *device = &dev->pdev->dev;
2706 2706
2707 assert_rpm_wakelock_held(dev_priv); 2707 assert_rpm_wakelock_held(dev_priv);
@@ -2720,7 +2720,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2720 */ 2720 */
2721void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2721void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2722{ 2722{
2723 struct drm_device *dev = dev_priv->dev; 2723 struct drm_device *dev = &dev_priv->drm;
2724 struct device *device = &dev->pdev->dev; 2724 struct device *device = &dev->pdev->dev;
2725 2725
2726 assert_rpm_wakelock_held(dev_priv); 2726 assert_rpm_wakelock_held(dev_priv);
@@ -2743,7 +2743,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2743 */ 2743 */
2744void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2744void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2745{ 2745{
2746 struct drm_device *dev = dev_priv->dev; 2746 struct drm_device *dev = &dev_priv->drm;
2747 struct device *device = &dev->pdev->dev; 2747 struct device *device = &dev->pdev->dev;
2748 2748
2749 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 2749 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 321121e8fa2c..e378f35365a2 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1471 temp &= ~SDVO_ENABLE; 1471 temp &= ~SDVO_ENABLE;
1472 intel_sdvo_write_sdvox(intel_sdvo, temp); 1472 intel_sdvo_write_sdvox(intel_sdvo, temp);
1473 1473
1474 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 1474 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
1475 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1475 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1476 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1476 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1477 } 1477 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1d65209c0998..ff80a81b1a84 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1471,7 +1471,7 @@ static int i915_reset_complete(struct pci_dev *pdev)
1471 1471
1472static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1472static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1473{ 1473{
1474 struct pci_dev *pdev = dev_priv->dev->pdev; 1474 struct pci_dev *pdev = dev_priv->drm.pdev;
1475 1475
1476 /* assert reset for at least 20 usec */ 1476 /* assert reset for at least 20 usec */
1477 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1477 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
@@ -1490,14 +1490,14 @@ static int g4x_reset_complete(struct pci_dev *pdev)
1490 1490
1491static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1491static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1492{ 1492{
1493 struct pci_dev *pdev = dev_priv->dev->pdev; 1493 struct pci_dev *pdev = dev_priv->drm.pdev;
1494 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1494 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1495 return wait_for(g4x_reset_complete(pdev), 500); 1495 return wait_for(g4x_reset_complete(pdev), 500);
1496} 1496}
1497 1497
1498static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1498static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1499{ 1499{
1500 struct pci_dev *pdev = dev_priv->dev->pdev; 1500 struct pci_dev *pdev = dev_priv->drm.pdev;
1501 int ret; 1501 int ret;
1502 1502
1503 pci_write_config_byte(pdev, I915_GDRST, 1503 pci_write_config_byte(pdev, I915_GDRST,