aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c6
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c102
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c14
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c16
-rw-r--r--drivers/gpu/drm/drm_drv.c72
-rw-r--r--drivers/gpu/drm/drm_edid.c190
-rw-r--r--drivers/gpu/drm/drm_edid_load.c108
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c13
-rw-r--r--drivers/gpu/drm/drm_fops.c68
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_irq.c152
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c41
-rw-r--r--drivers/gpu/drm/drm_pci.c65
-rw-r--r--drivers/gpu/drm/drm_platform.c59
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/drm_stub.c301
-rw-r--r--drivers/gpu/drm/drm_sysfs.c94
-rw-r--r--drivers/gpu/drm/drm_usb.c57
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c22
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c11
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/dvo.h11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c239
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c75
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c81
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h195
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c469
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c366
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c32
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c207
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h521
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c142
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h62
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c195
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h87
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c44
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c96
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1200
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c602
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h522
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c620
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h102
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c427
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h109
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c317
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c12
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c25
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c69
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c64
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c431
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c205
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c547
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c71
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c52
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c74
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c12
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c17
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c49
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c5
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/host1x/drm/drm.c2
129 files changed, 7169 insertions, 3064 deletions
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index e8605bf738b3..d8e398275ca8 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -894,7 +894,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
894 * and a mode_set. 894 * and a mode_set.
895 */ 895 */
896static int armada_drm_crtc_page_flip(struct drm_crtc *crtc, 896static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
897 struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) 897 struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
898{ 898{
899 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 899 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
900 struct armada_frame_work *work; 900 struct armada_frame_work *work;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 7aede900a221..4f2b28354915 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -349,7 +349,6 @@ static struct drm_driver armada_drm_driver = {
349 .debugfs_init = armada_drm_debugfs_init, 349 .debugfs_init = armada_drm_debugfs_init,
350 .debugfs_cleanup = armada_drm_debugfs_cleanup, 350 .debugfs_cleanup = armada_drm_debugfs_cleanup,
351#endif 351#endif
352 .gem_init_object = NULL,
353 .gem_free_object = armada_gem_free_object, 352 .gem_free_object = armada_gem_free_object,
354 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 353 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
355 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 354 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 32e270dc714e..5137f15dba19 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -211,7 +211,6 @@ static struct drm_driver driver = {
211 .minor = DRIVER_MINOR, 211 .minor = DRIVER_MINOR,
212 .patchlevel = DRIVER_PATCHLEVEL, 212 .patchlevel = DRIVER_PATCHLEVEL,
213 213
214 .gem_init_object = ast_gem_init_object,
215 .gem_free_object = ast_gem_free_object, 214 .gem_free_object = ast_gem_free_object,
216 .dumb_create = ast_dumb_create, 215 .dumb_create = ast_dumb_create,
217 .dumb_map_offset = ast_dumb_mmap_offset, 216 .dumb_map_offset = ast_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8492b68e873c..9833a1b1acc1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
323 struct drm_device *dev, 323 struct drm_device *dev,
324 struct drm_mode_create_dumb *args); 324 struct drm_mode_create_dumb *args);
325 325
326extern int ast_gem_init_object(struct drm_gem_object *obj);
327extern void ast_gem_free_object(struct drm_gem_object *obj); 326extern void ast_gem_free_object(struct drm_gem_object *obj);
328extern int ast_dumb_mmap_offset(struct drm_file *file, 327extern int ast_dumb_mmap_offset(struct drm_file *file,
329 struct drm_device *dev, 328 struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d374ca..af0b868a9dfd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
449 return 0; 449 return 0;
450} 450}
451 451
452int ast_gem_init_object(struct drm_gem_object *obj)
453{
454 BUG();
455 return 0;
456}
457
458void ast_bo_unref(struct ast_bo **bo) 452void ast_bo_unref(struct ast_bo **bo)
459{ 453{
460 struct ttm_buffer_object *tbo; 454 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d91782..953fc8aea69c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -97,7 +97,6 @@ static struct drm_driver driver = {
97 .major = DRIVER_MAJOR, 97 .major = DRIVER_MAJOR,
98 .minor = DRIVER_MINOR, 98 .minor = DRIVER_MINOR,
99 .patchlevel = DRIVER_PATCHLEVEL, 99 .patchlevel = DRIVER_PATCHLEVEL,
100 .gem_init_object = cirrus_gem_init_object,
101 .gem_free_object = cirrus_gem_free_object, 100 .gem_free_object = cirrus_gem_free_object,
102 .dumb_create = cirrus_dumb_create, 101 .dumb_create = cirrus_dumb_create,
103 .dumb_map_offset = cirrus_dumb_mmap_offset, 102 .dumb_map_offset = cirrus_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 9b0bb9184afd..b6aded73838b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
191 struct pci_dev *pdev, 191 struct pci_dev *pdev,
192 uint32_t flags); 192 uint32_t flags);
193void cirrus_device_fini(struct cirrus_device *cdev); 193void cirrus_device_fini(struct cirrus_device *cdev);
194int cirrus_gem_init_object(struct drm_gem_object *obj);
195void cirrus_gem_free_object(struct drm_gem_object *obj); 194void cirrus_gem_free_object(struct drm_gem_object *obj);
196int cirrus_dumb_mmap_offset(struct drm_file *file, 195int cirrus_dumb_mmap_offset(struct drm_file *file,
197 struct drm_device *dev, 196 struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index f130a533a512..78e76f24343d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
255 return 0; 255 return 0;
256} 256}
257 257
258int cirrus_gem_init_object(struct drm_gem_object *obj)
259{
260 BUG();
261 return 0;
262}
263
264void cirrus_bo_unref(struct cirrus_bo **bo) 258void cirrus_bo_unref(struct cirrus_bo **bo)
265{ 259{
266 struct ttm_buffer_object *tbo; 260 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 224ff965bcf7..a4b017b6849e 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
334 334
335 mutex_lock(&dev->ctxlist_mutex); 335 mutex_lock(&dev->ctxlist_mutex);
336 list_add(&ctx_entry->head, &dev->ctxlist); 336 list_add(&ctx_entry->head, &dev->ctxlist);
337 ++dev->ctx_count;
338 mutex_unlock(&dev->ctxlist_mutex); 337 mutex_unlock(&dev->ctxlist_mutex);
339 338
340 return 0; 339 return 0;
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
432 if (pos->handle == ctx->handle) { 431 if (pos->handle == ctx->handle) {
433 list_del(&pos->head); 432 list_del(&pos->head);
434 kfree(pos); 433 kfree(pos);
435 --dev->ctx_count;
436 } 434 }
437 } 435 }
438 } 436 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bff2fa941f60..d7a8370e3cdc 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
202 { DRM_MODE_CONNECTOR_TV, "TV" }, 202 { DRM_MODE_CONNECTOR_TV, "TV" },
203 { DRM_MODE_CONNECTOR_eDP, "eDP" }, 203 { DRM_MODE_CONNECTOR_eDP, "eDP" },
204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, 204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
205 { DRM_MODE_CONNECTOR_DSI, "DSI" },
205}; 206};
206 207
207static const struct drm_prop_enum_list drm_encoder_enum_list[] = 208static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
211 { DRM_MODE_ENCODER_LVDS, "LVDS" }, 212 { DRM_MODE_ENCODER_LVDS, "LVDS" },
212 { DRM_MODE_ENCODER_TVDAC, "TV" }, 213 { DRM_MODE_ENCODER_TVDAC, "TV" },
213 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 214 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
215 { DRM_MODE_ENCODER_DSI, "DSI" },
214}; 216};
215 217
216void drm_connector_ida_init(void) 218void drm_connector_ida_init(void)
@@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
1317 if (in->clock > INT_MAX || in->vrefresh > INT_MAX) 1319 if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
1318 return -ERANGE; 1320 return -ERANGE;
1319 1321
1322 if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
1323 return -EINVAL;
1324
1320 out->clock = in->clock; 1325 out->clock = in->clock;
1321 out->hdisplay = in->hdisplay; 1326 out->hdisplay = in->hdisplay;
1322 out->hsync_start = in->hsync_start; 1327 out->hsync_start = in->hsync_start;
@@ -1579,6 +1584,19 @@ out:
1579 return ret; 1584 return ret;
1580} 1585}
1581 1586
1587static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1588 const struct drm_file *file_priv)
1589{
1590 /*
1591 * If user-space hasn't configured the driver to expose the stereo 3D
1592 * modes, don't expose them.
1593 */
1594 if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
1595 return false;
1596
1597 return true;
1598}
1599
1582/** 1600/**
1583 * drm_mode_getconnector - get connector configuration 1601 * drm_mode_getconnector - get connector configuration
1584 * @dev: drm device for the ioctl 1602 * @dev: drm device for the ioctl
@@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1644 1662
1645 /* delayed so we get modes regardless of pre-fill_modes state */ 1663 /* delayed so we get modes regardless of pre-fill_modes state */
1646 list_for_each_entry(mode, &connector->modes, head) 1664 list_for_each_entry(mode, &connector->modes, head)
1647 mode_count++; 1665 if (drm_mode_expose_to_userspace(mode, file_priv))
1666 mode_count++;
1648 1667
1649 out_resp->connector_id = connector->base.id; 1668 out_resp->connector_id = connector->base.id;
1650 out_resp->connector_type = connector->connector_type; 1669 out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1666 copied = 0; 1685 copied = 0;
1667 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; 1686 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
1668 list_for_each_entry(mode, &connector->modes, head) { 1687 list_for_each_entry(mode, &connector->modes, head) {
1688 if (!drm_mode_expose_to_userspace(mode, file_priv))
1689 continue;
1690
1669 drm_crtc_convert_to_umode(&u_mode, mode); 1691 drm_crtc_convert_to_umode(&u_mode, mode);
1670 if (copy_to_user(mode_ptr + copied, 1692 if (copy_to_user(mode_ptr + copied,
1671 &u_mode, sizeof(u_mode))) { 1693 &u_mode, sizeof(u_mode))) {
@@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2040} 2062}
2041EXPORT_SYMBOL(drm_mode_set_config_internal); 2063EXPORT_SYMBOL(drm_mode_set_config_internal);
2042 2064
2065/*
2066 * Checks that the framebuffer is big enough for the CRTC viewport
2067 * (x, y, hdisplay, vdisplay)
2068 */
2069static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2070 int x, int y,
2071 const struct drm_display_mode *mode,
2072 const struct drm_framebuffer *fb)
2073
2074{
2075 int hdisplay, vdisplay;
2076
2077 hdisplay = mode->hdisplay;
2078 vdisplay = mode->vdisplay;
2079
2080 if (drm_mode_is_stereo(mode)) {
2081 struct drm_display_mode adjusted = *mode;
2082
2083 drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
2084 hdisplay = adjusted.crtc_hdisplay;
2085 vdisplay = adjusted.crtc_vdisplay;
2086 }
2087
2088 if (crtc->invert_dimensions)
2089 swap(hdisplay, vdisplay);
2090
2091 if (hdisplay > fb->width ||
2092 vdisplay > fb->height ||
2093 x > fb->width - hdisplay ||
2094 y > fb->height - vdisplay) {
2095 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2096 fb->width, fb->height, hdisplay, vdisplay, x, y,
2097 crtc->invert_dimensions ? " (inverted)" : "");
2098 return -ENOSPC;
2099 }
2100
2101 return 0;
2102}
2103
2043/** 2104/**
2044 * drm_mode_setcrtc - set CRTC configuration 2105 * drm_mode_setcrtc - set CRTC configuration
2045 * @dev: drm device for the ioctl 2106 * @dev: drm device for the ioctl
@@ -2087,7 +2148,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2087 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 2148 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
2088 2149
2089 if (crtc_req->mode_valid) { 2150 if (crtc_req->mode_valid) {
2090 int hdisplay, vdisplay;
2091 /* If we have a mode we need a framebuffer. */ 2151 /* If we have a mode we need a framebuffer. */
2092 /* If we pass -1, set the mode with the currently bound fb */ 2152 /* If we pass -1, set the mode with the currently bound fb */
2093 if (crtc_req->fb_id == -1) { 2153 if (crtc_req->fb_id == -1) {
@@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2123 2183
2124 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 2184 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
2125 2185
2126 hdisplay = mode->hdisplay; 2186 ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
2127 vdisplay = mode->vdisplay; 2187 mode, fb);
2128 2188 if (ret)
2129 if (crtc->invert_dimensions)
2130 swap(hdisplay, vdisplay);
2131
2132 if (hdisplay > fb->width ||
2133 vdisplay > fb->height ||
2134 crtc_req->x > fb->width - hdisplay ||
2135 crtc_req->y > fb->height - vdisplay) {
2136 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2137 fb->width, fb->height,
2138 hdisplay, vdisplay, crtc_req->x, crtc_req->y,
2139 crtc->invert_dimensions ? " (inverted)" : "");
2140 ret = -ENOSPC;
2141 goto out; 2189 goto out;
2142 } 2190
2143 } 2191 }
2144 2192
2145 if (crtc_req->count_connectors == 0 && mode) { 2193 if (crtc_req->count_connectors == 0 && mode) {
@@ -3556,7 +3604,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3556 struct drm_framebuffer *fb = NULL, *old_fb = NULL; 3604 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
3557 struct drm_pending_vblank_event *e = NULL; 3605 struct drm_pending_vblank_event *e = NULL;
3558 unsigned long flags; 3606 unsigned long flags;
3559 int hdisplay, vdisplay;
3560 int ret = -EINVAL; 3607 int ret = -EINVAL;
3561 3608
3562 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 3609 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3588,22 +3635,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3588 if (!fb) 3635 if (!fb)
3589 goto out; 3636 goto out;
3590 3637
3591 hdisplay = crtc->mode.hdisplay; 3638 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
3592 vdisplay = crtc->mode.vdisplay; 3639 if (ret)
3593
3594 if (crtc->invert_dimensions)
3595 swap(hdisplay, vdisplay);
3596
3597 if (hdisplay > fb->width ||
3598 vdisplay > fb->height ||
3599 crtc->x > fb->width - hdisplay ||
3600 crtc->y > fb->height - vdisplay) {
3601 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
3602 fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
3603 crtc->invert_dimensions ? " (inverted)" : "");
3604 ret = -ENOSPC;
3605 goto out; 3640 goto out;
3606 }
3607 3641
3608 if (crtc->fb->pixel_format != fb->pixel_format) { 3642 if (crtc->fb->pixel_format != fb->pixel_format) {
3609 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); 3643 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c722c3b5404d..5fcb9d487672 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -76,7 +76,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
76{ 76{
77 struct drm_display_mode *mode; 77 struct drm_display_mode *mode;
78 78
79 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) 79 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
80 DRM_MODE_FLAG_3D_MASK))
80 return; 81 return;
81 82
82 list_for_each_entry(mode, &connector->modes, head) { 83 list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +87,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
86 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) && 87 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
87 !(flags & DRM_MODE_FLAG_DBLSCAN)) 88 !(flags & DRM_MODE_FLAG_DBLSCAN))
88 mode->status = MODE_NO_DBLESCAN; 89 mode->status = MODE_NO_DBLESCAN;
90 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
91 !(flags & DRM_MODE_FLAG_3D_MASK))
92 mode->status = MODE_NO_STEREO;
89 } 93 }
90 94
91 return; 95 return;
@@ -105,9 +109,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
105 * then culled (based on validity and the @maxX, @maxY parameters) and put into 109 * then culled (based on validity and the @maxX, @maxY parameters) and put into
106 * the normal modes list. 110 * the normal modes list.
107 * 111 *
108 * Intended to be use as a generic implementation of the ->probe() @connector 112 * Intended to be use as a generic implementation of the ->fill_modes()
109 * callback for drivers that use the crtc helpers for output mode filtering and 113 * @connector vfunc for drivers that use the crtc helpers for output mode
110 * detection. 114 * filtering and detection.
111 * 115 *
112 * RETURNS: 116 * RETURNS:
113 * Number of modes found on @connector. 117 * Number of modes found on @connector.
@@ -175,6 +179,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
175 mode_flags |= DRM_MODE_FLAG_INTERLACE; 179 mode_flags |= DRM_MODE_FLAG_INTERLACE;
176 if (connector->doublescan_allowed) 180 if (connector->doublescan_allowed)
177 mode_flags |= DRM_MODE_FLAG_DBLSCAN; 181 mode_flags |= DRM_MODE_FLAG_DBLSCAN;
182 if (connector->stereo_allowed)
183 mode_flags |= DRM_MODE_FLAG_3D_MASK;
178 drm_mode_validate_flag(connector, mode_flags); 184 drm_mode_validate_flag(connector, mode_flags);
179 185
180 list_for_each_entry(mode, &connector->modes, head) { 186 list_for_each_entry(mode, &connector->modes, head) {
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 89e196627160..9e978aae8972 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
228EXPORT_SYMBOL(i2c_dp_aux_add_bus); 228EXPORT_SYMBOL(i2c_dp_aux_add_bus);
229 229
230/* Helpers for DP link training */ 230/* Helpers for DP link training */
231static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) 231static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
232{ 232{
233 return link_status[r - DP_LANE0_1_STATUS]; 233 return link_status[r - DP_LANE0_1_STATUS];
234} 234}
235 235
236static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], 236static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
237 int lane) 237 int lane)
238{ 238{
239 int i = DP_LANE0_1_STATUS + (lane >> 1); 239 int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
242 return (l >> s) & 0xf; 242 return (l >> s) & 0xf;
243} 243}
244 244
245bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], 245bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
246 int lane_count) 246 int lane_count)
247{ 247{
248 u8 lane_align; 248 u8 lane_align;
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
262} 262}
263EXPORT_SYMBOL(drm_dp_channel_eq_ok); 263EXPORT_SYMBOL(drm_dp_channel_eq_ok);
264 264
265bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], 265bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
266 int lane_count) 266 int lane_count)
267{ 267{
268 int lane; 268 int lane;
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
277} 277}
278EXPORT_SYMBOL(drm_dp_clock_recovery_ok); 278EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
279 279
280u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], 280u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
281 int lane) 281 int lane)
282{ 282{
283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
290} 290}
291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); 291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
292 292
293u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 293u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
294 int lane) 294 int lane)
295{ 295{
296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
303} 303}
304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); 304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
305 305
306void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 306void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
308 udelay(100); 308 udelay(100);
309 else 309 else
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
311} 311}
312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); 312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
313 313
314void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 314void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
316 udelay(400); 316 udelay(400);
317 else 317 else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index e572dd20bdee..b55f138bd990 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 73 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 74
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 75 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
170 171
171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
172 173
173/**
174 * drm_legacy_dev_reinit
175 *
176 * Reinitializes a legacy/ums drm device in it's lastclose function.
177 */
178static void drm_legacy_dev_reinit(struct drm_device *dev)
179{
180 int i;
181
182 if (drm_core_check_feature(dev, DRIVER_MODESET))
183 return;
184
185 atomic_set(&dev->ioctl_count, 0);
186 atomic_set(&dev->vma_count, 0);
187
188 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
189 atomic_set(&dev->counts[i], 0);
190
191 dev->sigdata.lock = NULL;
192
193 dev->context_flag = 0;
194 dev->last_context = 0;
195 dev->if_version = 0;
196}
197
198/**
199 * Take down the DRM device.
200 *
201 * \param dev DRM device structure.
202 *
203 * Frees every resource in \p dev.
204 *
205 * \sa drm_device
206 */
207int drm_lastclose(struct drm_device * dev)
208{
209 struct drm_vma_entry *vma, *vma_temp;
210
211 DRM_DEBUG("\n");
212
213 if (dev->driver->lastclose)
214 dev->driver->lastclose(dev);
215 DRM_DEBUG("driver lastclose completed\n");
216
217 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
218 drm_irq_uninstall(dev);
219
220 mutex_lock(&dev->struct_mutex);
221
222 drm_agp_clear(dev);
223
224 drm_legacy_sg_cleanup(dev);
225
226 /* Clear vma list (only built for debugging) */
227 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
228 list_del(&vma->head);
229 kfree(vma);
230 }
231
232 drm_legacy_dma_takedown(dev);
233
234 dev->dev_mapping = NULL;
235 mutex_unlock(&dev->struct_mutex);
236
237 drm_legacy_dev_reinit(dev);
238
239 DRM_DEBUG("lastclose completed\n");
240 return 0;
241}
242
243/** File operations structure */ 174/** File operations structure */
244static const struct file_operations drm_stub_fops = { 175static const struct file_operations drm_stub_fops = {
245 .owner = THIS_MODULE, 176 .owner = THIS_MODULE,
@@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
385 return -ENODEV; 316 return -ENODEV;
386 317
387 atomic_inc(&dev->ioctl_count); 318 atomic_inc(&dev->ioctl_count);
388 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
389 ++file_priv->ioctl_count; 319 ++file_priv->ioctl_count;
390 320
391 if ((nr >= DRM_CORE_IOCTL_COUNT) && 321 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1688ff500513..9e81609b1e29 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1264,6 +1264,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1264} 1264}
1265EXPORT_SYMBOL(drm_get_edid); 1265EXPORT_SYMBOL(drm_get_edid);
1266 1266
1267/**
1268 * drm_edid_duplicate - duplicate an EDID and the extensions
1269 * @edid: EDID to duplicate
1270 *
1271 * Return duplicate edid or NULL on allocation failure.
1272 */
1273struct edid *drm_edid_duplicate(const struct edid *edid)
1274{
1275 return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
1276}
1277EXPORT_SYMBOL(drm_edid_duplicate);
1278
1267/*** EDID parsing ***/ 1279/*** EDID parsing ***/
1268 1280
1269/** 1281/**
@@ -2404,7 +2416,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2404 2416
2405 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2417 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2406 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2418 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2407 drm_mode_equal_no_clocks(to_match, cea_mode)) 2419 drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
2408 return mode + 1; 2420 return mode + 1;
2409 } 2421 }
2410 return 0; 2422 return 0;
@@ -2453,7 +2465,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2453 2465
2454 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2466 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2455 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2467 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2456 drm_mode_equal_no_clocks(to_match, hdmi_mode)) 2468 drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
2457 return mode + 1; 2469 return mode + 1;
2458 } 2470 }
2459 return 0; 2471 return 0;
@@ -2507,6 +2519,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2507 if (!newmode) 2519 if (!newmode)
2508 continue; 2520 continue;
2509 2521
2522 /* Carry over the stereo flags */
2523 newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
2524
2510 /* 2525 /*
2511 * The current mode could be either variant. Make 2526 * The current mode could be either variant. Make
2512 * sure to pick the "other" clock for the new mode. 2527 * sure to pick the "other" clock for the new mode.
@@ -2553,18 +2568,102 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
2553 return modes; 2568 return modes;
2554} 2569}
2555 2570
2571struct stereo_mandatory_mode {
2572 int width, height, vrefresh;
2573 unsigned int flags;
2574};
2575
2576static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
2577 { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2578 { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
2579 { 1920, 1080, 50,
2580 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2581 { 1920, 1080, 60,
2582 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2583 { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2584 { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
2585 { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2586 { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
2587};
2588
2589static bool
2590stereo_match_mandatory(const struct drm_display_mode *mode,
2591 const struct stereo_mandatory_mode *stereo_mode)
2592{
2593 unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
2594
2595 return mode->hdisplay == stereo_mode->width &&
2596 mode->vdisplay == stereo_mode->height &&
2597 interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
2598 drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
2599}
2600
2601static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
2602{
2603 struct drm_device *dev = connector->dev;
2604 const struct drm_display_mode *mode;
2605 struct list_head stereo_modes;
2606 int modes = 0, i;
2607
2608 INIT_LIST_HEAD(&stereo_modes);
2609
2610 list_for_each_entry(mode, &connector->probed_modes, head) {
2611 for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
2612 const struct stereo_mandatory_mode *mandatory;
2613 struct drm_display_mode *new_mode;
2614
2615 if (!stereo_match_mandatory(mode,
2616 &stereo_mandatory_modes[i]))
2617 continue;
2618
2619 mandatory = &stereo_mandatory_modes[i];
2620 new_mode = drm_mode_duplicate(dev, mode);
2621 if (!new_mode)
2622 continue;
2623
2624 new_mode->flags |= mandatory->flags;
2625 list_add_tail(&new_mode->head, &stereo_modes);
2626 modes++;
2627 }
2628 }
2629
2630 list_splice_tail(&stereo_modes, &connector->probed_modes);
2631
2632 return modes;
2633}
2634
2635static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
2636{
2637 struct drm_device *dev = connector->dev;
2638 struct drm_display_mode *newmode;
2639
2640 vic--; /* VICs start at 1 */
2641 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2642 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2643 return 0;
2644 }
2645
2646 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2647 if (!newmode)
2648 return 0;
2649
2650 drm_mode_probed_add(connector, newmode);
2651
2652 return 1;
2653}
2654
2556/* 2655/*
2557 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block 2656 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
2558 * @connector: connector corresponding to the HDMI sink 2657 * @connector: connector corresponding to the HDMI sink
2559 * @db: start of the CEA vendor specific block 2658 * @db: start of the CEA vendor specific block
2560 * @len: length of the CEA block payload, ie. one can access up to db[len] 2659 * @len: length of the CEA block payload, ie. one can access up to db[len]
2561 * 2660 *
2562 * Parses the HDMI VSDB looking for modes to add to @connector. 2661 * Parses the HDMI VSDB looking for modes to add to @connector. This function
2662 * also adds the stereo 3d modes when applicable.
2563 */ 2663 */
2564static int 2664static int
2565do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) 2665do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2566{ 2666{
2567 struct drm_device *dev = connector->dev;
2568 int modes = 0, offset = 0, i; 2667 int modes = 0, offset = 0, i;
2569 u8 vic_len; 2668 u8 vic_len;
2570 2669
@@ -2585,30 +2684,22 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2585 2684
2586 /* the declared length is not long enough for the 2 first bytes 2685 /* the declared length is not long enough for the 2 first bytes
2587 * of additional video format capabilities */ 2686 * of additional video format capabilities */
2588 offset += 2; 2687 if (len < (8 + offset + 2))
2589 if (len < (8 + offset))
2590 goto out; 2688 goto out;
2591 2689
2690 /* 3D_Present */
2691 offset++;
2692 if (db[8 + offset] & (1 << 7))
2693 modes += add_hdmi_mandatory_stereo_modes(connector);
2694
2695 offset++;
2592 vic_len = db[8 + offset] >> 5; 2696 vic_len = db[8 + offset] >> 5;
2593 2697
2594 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { 2698 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
2595 struct drm_display_mode *newmode;
2596 u8 vic; 2699 u8 vic;
2597 2700
2598 vic = db[9 + offset + i]; 2701 vic = db[9 + offset + i];
2599 2702 modes += add_hdmi_mode(connector, vic);
2600 vic--; /* VICs start at 1 */
2601 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2602 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2603 continue;
2604 }
2605
2606 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2607 if (!newmode)
2608 continue;
2609
2610 drm_mode_probed_add(connector, newmode);
2611 modes++;
2612 } 2703 }
2613 2704
2614out: 2705out:
@@ -2668,8 +2759,8 @@ static int
2668add_cea_modes(struct drm_connector *connector, struct edid *edid) 2759add_cea_modes(struct drm_connector *connector, struct edid *edid)
2669{ 2760{
2670 const u8 *cea = drm_find_cea_extension(edid); 2761 const u8 *cea = drm_find_cea_extension(edid);
2671 const u8 *db; 2762 const u8 *db, *hdmi = NULL;
2672 u8 dbl; 2763 u8 dbl, hdmi_len;
2673 int modes = 0; 2764 int modes = 0;
2674 2765
2675 if (cea && cea_revision(cea) >= 3) { 2766 if (cea && cea_revision(cea) >= 3) {
@@ -2684,11 +2775,20 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
2684 2775
2685 if (cea_db_tag(db) == VIDEO_BLOCK) 2776 if (cea_db_tag(db) == VIDEO_BLOCK)
2686 modes += do_cea_modes(connector, db + 1, dbl); 2777 modes += do_cea_modes(connector, db + 1, dbl);
2687 else if (cea_db_is_hdmi_vsdb(db)) 2778 else if (cea_db_is_hdmi_vsdb(db)) {
2688 modes += do_hdmi_vsdb_modes(connector, db, dbl); 2779 hdmi = db;
2780 hdmi_len = dbl;
2781 }
2689 } 2782 }
2690 } 2783 }
2691 2784
2785 /*
2786 * We parse the HDMI VSDB after having added the cea modes as we will
2787 * be patching their flags when the sink supports stereo 3D.
2788 */
2789 if (hdmi)
2790 modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len);
2791
2692 return modes; 2792 return modes;
2693} 2793}
2694 2794
@@ -2925,6 +3025,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
2925 /* Speaker Allocation Data Block */ 3025 /* Speaker Allocation Data Block */
2926 if (dbl == 3) { 3026 if (dbl == 3) {
2927 *sadb = kmalloc(dbl, GFP_KERNEL); 3027 *sadb = kmalloc(dbl, GFP_KERNEL);
3028 if (!*sadb)
3029 return -ENOMEM;
2928 memcpy(*sadb, &db[1], dbl); 3030 memcpy(*sadb, &db[1], dbl);
2929 count = dbl; 3031 count = dbl;
2930 break; 3032 break;
@@ -3319,6 +3421,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3319} 3421}
3320EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 3422EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
3321 3423
3424static enum hdmi_3d_structure
3425s3d_structure_from_display_mode(const struct drm_display_mode *mode)
3426{
3427 u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
3428
3429 switch (layout) {
3430 case DRM_MODE_FLAG_3D_FRAME_PACKING:
3431 return HDMI_3D_STRUCTURE_FRAME_PACKING;
3432 case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
3433 return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
3434 case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
3435 return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
3436 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
3437 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
3438 case DRM_MODE_FLAG_3D_L_DEPTH:
3439 return HDMI_3D_STRUCTURE_L_DEPTH;
3440 case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
3441 return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
3442 case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
3443 return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
3444 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
3445 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
3446 default:
3447 return HDMI_3D_STRUCTURE_INVALID;
3448 }
3449}
3450
3322/** 3451/**
3323 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with 3452 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
3324 * data from a DRM display mode 3453 * data from a DRM display mode
@@ -3336,20 +3465,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3336 const struct drm_display_mode *mode) 3465 const struct drm_display_mode *mode)
3337{ 3466{
3338 int err; 3467 int err;
3468 u32 s3d_flags;
3339 u8 vic; 3469 u8 vic;
3340 3470
3341 if (!frame || !mode) 3471 if (!frame || !mode)
3342 return -EINVAL; 3472 return -EINVAL;
3343 3473
3344 vic = drm_match_hdmi_mode(mode); 3474 vic = drm_match_hdmi_mode(mode);
3345 if (!vic) 3475 s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
3476
3477 if (!vic && !s3d_flags)
3478 return -EINVAL;
3479
3480 if (vic && s3d_flags)
3346 return -EINVAL; 3481 return -EINVAL;
3347 3482
3348 err = hdmi_vendor_infoframe_init(frame); 3483 err = hdmi_vendor_infoframe_init(frame);
3349 if (err < 0) 3484 if (err < 0)
3350 return err; 3485 return err;
3351 3486
3352 frame->vic = vic; 3487 if (vic)
3488 frame->vic = vic;
3489 else
3490 frame->s3d_struct = s3d_structure_from_display_mode(mode);
3353 3491
3354 return 0; 3492 return 0;
3355} 3493}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 271b42bbfb72..9081172ef057 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
32 "from built-in data or /lib/firmware instead. "); 32 "from built-in data or /lib/firmware instead. ");
33 33
34#define GENERIC_EDIDS 5 34#define GENERIC_EDIDS 5
35static char *generic_edid_name[GENERIC_EDIDS] = { 35static const char *generic_edid_name[GENERIC_EDIDS] = {
36 "edid/1024x768.bin", 36 "edid/1024x768.bin",
37 "edid/1280x1024.bin", 37 "edid/1280x1024.bin",
38 "edid/1600x1200.bin", 38 "edid/1600x1200.bin",
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
40 "edid/1920x1080.bin", 40 "edid/1920x1080.bin",
41}; 41};
42 42
43static u8 generic_edid[GENERIC_EDIDS][128] = { 43static const u8 generic_edid[GENERIC_EDIDS][128] = {
44 { 44 {
45 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 45 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
46 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 46 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
133 }, 133 },
134}; 134};
135 135
136static int edid_size(const u8 *edid, int data_size)
137{
138 if (data_size < EDID_LENGTH)
139 return 0;
140
141 return (edid[0x7e] + 1) * EDID_LENGTH;
142}
143
136static u8 *edid_load(struct drm_connector *connector, const char *name, 144static u8 *edid_load(struct drm_connector *connector, const char *name,
137 const char *connector_name) 145 const char *connector_name)
138{ 146{
139 const struct firmware *fw; 147 const struct firmware *fw = NULL;
140 struct platform_device *pdev; 148 const u8 *fwdata;
141 u8 *fwdata = NULL, *edid, *new_edid; 149 u8 *edid;
142 int fwsize, expected; 150 int fwsize, builtin;
143 int builtin = 0, err = 0;
144 int i, valid_extensions = 0; 151 int i, valid_extensions = 0;
145 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); 152 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
146 153
147 pdev = platform_device_register_simple(connector_name, -1, NULL, 0); 154 builtin = 0;
148 if (IS_ERR(pdev)) { 155 for (i = 0; i < GENERIC_EDIDS; i++) {
149 DRM_ERROR("Failed to register EDID firmware platform device " 156 if (strcmp(name, generic_edid_name[i]) == 0) {
150 "for connector \"%s\"\n", connector_name);
151 err = -EINVAL;
152 goto out;
153 }
154
155 err = request_firmware(&fw, name, &pdev->dev);
156 platform_device_unregister(pdev);
157
158 if (err) {
159 i = 0;
160 while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
161 i++;
162 if (i < GENERIC_EDIDS) {
163 err = 0;
164 builtin = 1;
165 fwdata = generic_edid[i]; 157 fwdata = generic_edid[i];
166 fwsize = sizeof(generic_edid[i]); 158 fwsize = sizeof(generic_edid[i]);
159 builtin = 1;
160 break;
167 } 161 }
168 } 162 }
163 if (!builtin) {
164 struct platform_device *pdev;
165 int err;
169 166
170 if (err) { 167 pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
171 DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n", 168 if (IS_ERR(pdev)) {
172 name, err); 169 DRM_ERROR("Failed to register EDID firmware platform device "
173 goto out; 170 "for connector \"%s\"\n", connector_name);
174 } 171 return ERR_CAST(pdev);
172 }
173
174 err = request_firmware(&fw, name, &pdev->dev);
175 platform_device_unregister(pdev);
176 if (err) {
177 DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
178 name, err);
179 return ERR_PTR(err);
180 }
175 181
176 if (fwdata == NULL) { 182 fwdata = fw->data;
177 fwdata = (u8 *) fw->data;
178 fwsize = fw->size; 183 fwsize = fw->size;
179 } 184 }
180 185
181 expected = (fwdata[0x7e] + 1) * EDID_LENGTH; 186 if (edid_size(fwdata, fwsize) != fwsize) {
182 if (expected != fwsize) {
183 DRM_ERROR("Size of EDID firmware \"%s\" is invalid " 187 DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
184 "(expected %d, got %d)\n", name, expected, (int) fwsize); 188 "(expected %d, got %d\n", name,
185 err = -EINVAL; 189 edid_size(fwdata, fwsize), (int)fwsize);
186 goto relfw_out; 190 edid = ERR_PTR(-EINVAL);
191 goto out;
187 } 192 }
188 193
189 edid = kmemdup(fwdata, fwsize, GFP_KERNEL); 194 edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
190 if (edid == NULL) { 195 if (edid == NULL) {
191 err = -ENOMEM; 196 edid = ERR_PTR(-ENOMEM);
192 goto relfw_out; 197 goto out;
193 } 198 }
194 199
195 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { 200 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
197 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", 202 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
198 name); 203 name);
199 kfree(edid); 204 kfree(edid);
200 err = -EINVAL; 205 edid = ERR_PTR(-EINVAL);
201 goto relfw_out; 206 goto out;
202 } 207 }
203 208
204 for (i = 1; i <= edid[0x7e]; i++) { 209 for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
210 } 215 }
211 216
212 if (valid_extensions != edid[0x7e]) { 217 if (valid_extensions != edid[0x7e]) {
218 u8 *new_edid;
219
213 edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; 220 edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
214 DRM_INFO("Found %d valid extensions instead of %d in EDID data " 221 DRM_INFO("Found %d valid extensions instead of %d in EDID data "
215 "\"%s\" for connector \"%s\"\n", valid_extensions, 222 "\"%s\" for connector \"%s\"\n", valid_extensions,
216 edid[0x7e], name, connector_name); 223 edid[0x7e], name, connector_name);
217 edid[0x7e] = valid_extensions; 224 edid[0x7e] = valid_extensions;
225
218 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, 226 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
219 GFP_KERNEL); 227 GFP_KERNEL);
220 if (new_edid == NULL) { 228 if (new_edid)
221 err = -ENOMEM; 229 edid = new_edid;
222 kfree(edid);
223 goto relfw_out;
224 }
225 edid = new_edid;
226 } 230 }
227 231
228 DRM_INFO("Got %s EDID base block and %d extension%s from " 232 DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
230 "external", valid_extensions, valid_extensions == 1 ? "" : "s", 234 "external", valid_extensions, valid_extensions == 1 ? "" : "s",
231 name, connector_name); 235 name, connector_name);
232 236
233relfw_out:
234 release_firmware(fw);
235
236out: 237out:
237 if (err) 238 if (fw)
238 return ERR_PTR(err); 239 release_firmware(fw);
239
240 return edid; 240 return edid;
241} 241}
242 242
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6f6cc7fc133..21742a81cb9c 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -852,7 +852,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
852 struct drm_fb_helper *fb_helper = info->par; 852 struct drm_fb_helper *fb_helper = info->par;
853 struct drm_device *dev = fb_helper->dev; 853 struct drm_device *dev = fb_helper->dev;
854 struct drm_mode_set *modeset; 854 struct drm_mode_set *modeset;
855 struct drm_crtc *crtc;
856 int ret = 0; 855 int ret = 0;
857 int i; 856 int i;
858 857
@@ -863,8 +862,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
863 } 862 }
864 863
865 for (i = 0; i < fb_helper->crtc_count; i++) { 864 for (i = 0; i < fb_helper->crtc_count; i++) {
866 crtc = fb_helper->crtc_info[i].mode_set.crtc;
867
868 modeset = &fb_helper->crtc_info[i].mode_set; 865 modeset = &fb_helper->crtc_info[i].mode_set;
869 866
870 modeset->x = var->xoffset; 867 modeset->x = var->xoffset;
@@ -1360,7 +1357,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1360 struct drm_connector *connector; 1357 struct drm_connector *connector;
1361 struct drm_connector_helper_funcs *connector_funcs; 1358 struct drm_connector_helper_funcs *connector_funcs;
1362 struct drm_encoder *encoder; 1359 struct drm_encoder *encoder;
1363 struct drm_fb_helper_crtc *best_crtc;
1364 int my_score, best_score, score; 1360 int my_score, best_score, score;
1365 struct drm_fb_helper_crtc **crtcs, *crtc; 1361 struct drm_fb_helper_crtc **crtcs, *crtc;
1366 struct drm_fb_helper_connector *fb_helper_conn; 1362 struct drm_fb_helper_connector *fb_helper_conn;
@@ -1372,7 +1368,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1372 connector = fb_helper_conn->connector; 1368 connector = fb_helper_conn->connector;
1373 1369
1374 best_crtcs[n] = NULL; 1370 best_crtcs[n] = NULL;
1375 best_crtc = NULL;
1376 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); 1371 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
1377 if (modes[n] == NULL) 1372 if (modes[n] == NULL)
1378 return best_score; 1373 return best_score;
@@ -1421,7 +1416,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1421 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, 1416 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
1422 width, height); 1417 width, height);
1423 if (score > best_score) { 1418 if (score > best_score) {
1424 best_crtc = crtc;
1425 best_score = score; 1419 best_score = score;
1426 memcpy(best_crtcs, crtcs, 1420 memcpy(best_crtcs, crtcs,
1427 dev->mode_config.num_connector * 1421 dev->mode_config.num_connector *
@@ -1588,8 +1582,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
1588int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 1582int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1589{ 1583{
1590 struct drm_device *dev = fb_helper->dev; 1584 struct drm_device *dev = fb_helper->dev;
1591 int count = 0; 1585 u32 max_width, max_height;
1592 u32 max_width, max_height, bpp_sel;
1593 1586
1594 if (!fb_helper->fb) 1587 if (!fb_helper->fb)
1595 return 0; 1588 return 0;
@@ -1604,10 +1597,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1604 1597
1605 max_width = fb_helper->fb->width; 1598 max_width = fb_helper->fb->width;
1606 max_height = fb_helper->fb->height; 1599 max_height = fb_helper->fb->height;
1607 bpp_sel = fb_helper->fb->bits_per_pixel;
1608 1600
1609 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1601 drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
1610 max_height);
1611 mutex_unlock(&fb_helper->dev->mode_config.mutex); 1602 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1612 1603
1613 drm_modeset_lock_all(dev); 1604 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3f84277d7036..d0e27667a4eb 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
113 retcode = drm_open_helper(inode, filp, dev); 113 retcode = drm_open_helper(inode, filp, dev);
114 if (retcode) 114 if (retcode)
115 goto err_undo; 115 goto err_undo;
116 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
117 if (need_setup) { 116 if (need_setup) {
118 retcode = drm_setup(dev); 117 retcode = drm_setup(dev);
119 if (retcode) 118 if (retcode)
@@ -386,6 +385,71 @@ static void drm_events_release(struct drm_file *file_priv)
386} 385}
387 386
388/** 387/**
388 * drm_legacy_dev_reinit
389 *
390 * Reinitializes a legacy/ums drm device in it's lastclose function.
391 */
392static void drm_legacy_dev_reinit(struct drm_device *dev)
393{
394 if (drm_core_check_feature(dev, DRIVER_MODESET))
395 return;
396
397 atomic_set(&dev->ioctl_count, 0);
398 atomic_set(&dev->vma_count, 0);
399
400 dev->sigdata.lock = NULL;
401
402 dev->context_flag = 0;
403 dev->last_context = 0;
404 dev->if_version = 0;
405}
406
407/**
408 * Take down the DRM device.
409 *
410 * \param dev DRM device structure.
411 *
412 * Frees every resource in \p dev.
413 *
414 * \sa drm_device
415 */
416int drm_lastclose(struct drm_device * dev)
417{
418 struct drm_vma_entry *vma, *vma_temp;
419
420 DRM_DEBUG("\n");
421
422 if (dev->driver->lastclose)
423 dev->driver->lastclose(dev);
424 DRM_DEBUG("driver lastclose completed\n");
425
426 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
427 drm_irq_uninstall(dev);
428
429 mutex_lock(&dev->struct_mutex);
430
431 drm_agp_clear(dev);
432
433 drm_legacy_sg_cleanup(dev);
434
435 /* Clear vma list (only built for debugging) */
436 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
437 list_del(&vma->head);
438 kfree(vma);
439 }
440
441 drm_legacy_dma_takedown(dev);
442
443 dev->dev_mapping = NULL;
444 mutex_unlock(&dev->struct_mutex);
445
446 drm_legacy_dev_reinit(dev);
447
448 DRM_DEBUG("lastclose completed\n");
449 return 0;
450}
451
452/**
389 * Release file. 453 * Release file.
390 * 454 *
391 * \param inode device inode 455 * \param inode device inode
@@ -454,7 +518,6 @@ int drm_release(struct inode *inode, struct file *filp)
454 518
455 list_del(&pos->head); 519 list_del(&pos->head);
456 kfree(pos); 520 kfree(pos);
457 --dev->ctx_count;
458 } 521 }
459 } 522 }
460 } 523 }
@@ -516,7 +579,6 @@ int drm_release(struct inode *inode, struct file *filp)
516 * End inline drm_release 579 * End inline drm_release
517 */ 580 */
518 581
519 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
520 if (!--dev->open_count) { 582 if (!--dev->open_count) {
521 if (atomic_read(&dev->ioctl_count)) { 583 if (atomic_read(&dev->ioctl_count)) {
522 DRM_ERROR("Device busy: %d\n", 584 DRM_ERROR("Device busy: %d\n",
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 49293bdc972a..4761adedad2a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
160} 160}
161EXPORT_SYMBOL(drm_gem_private_object_init); 161EXPORT_SYMBOL(drm_gem_private_object_init);
162 162
163/**
164 * Allocate a GEM object of the specified size with shmfs backing store
165 */
166struct drm_gem_object *
167drm_gem_object_alloc(struct drm_device *dev, size_t size)
168{
169 struct drm_gem_object *obj;
170
171 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
172 if (!obj)
173 goto free;
174
175 if (drm_gem_object_init(dev, obj, size) != 0)
176 goto free;
177
178 if (dev->driver->gem_init_object != NULL &&
179 dev->driver->gem_init_object(obj) != 0) {
180 goto fput;
181 }
182 return obj;
183fput:
184 /* Object_init mangles the global counters - readjust them. */
185 fput(obj->filp);
186free:
187 kfree(obj);
188 return NULL;
189}
190EXPORT_SYMBOL(drm_gem_object_alloc);
191
192static void 163static void
193drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 164drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
194{ 165{
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f7311162a61d..3d2e91c4d78e 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
67{ 67{
68 int ret; 68 int ret;
69 struct drm_global_item *item = &glob[ref->global_type]; 69 struct drm_global_item *item = &glob[ref->global_type];
70 void *object;
71 70
72 mutex_lock(&item->mutex); 71 mutex_lock(&item->mutex);
73 if (item->refcount == 0) { 72 if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
85 } 84 }
86 ++item->refcount; 85 ++item->refcount;
87 ref->object = item->object; 86 ref->object = item->object;
88 object = item->object;
89 mutex_unlock(&item->mutex); 87 mutex_unlock(&item->mutex);
90 return 0; 88 return 0;
91out_err: 89out_err:
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 53298320080b..7d5a152eeb02 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
163 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
164 for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 164 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
165 seq_printf(m, "CRTC %d enable: %d\n", 165 seq_printf(m, "CRTC %d enable: %d\n",
166 crtc, atomic_read(&dev->vblank_refcount[crtc])); 166 crtc, atomic_read(&dev->vblank[crtc].refcount));
167 seq_printf(m, "CRTC %d counter: %d\n", 167 seq_printf(m, "CRTC %d counter: %d\n",
168 crtc, drm_vblank_count(dev, crtc)); 168 crtc, drm_vblank_count(dev, crtc));
169 seq_printf(m, "CRTC %d last wait: %d\n", 169 seq_printf(m, "CRTC %d last wait: %d\n",
170 crtc, dev->last_vblank_wait[crtc]); 170 crtc, dev->vblank[crtc].last_wait);
171 seq_printf(m, "CRTC %d in modeset: %d\n", 171 seq_printf(m, "CRTC %d in modeset: %d\n",
172 crtc, dev->vblank_inmodeset[crtc]); 172 crtc, dev->vblank[crtc].inmodeset);
173 } 173 }
174 mutex_unlock(&dev->struct_mutex); 174 mutex_unlock(&dev->struct_mutex);
175 return 0; 175 return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2855a2..dffc836144cc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
303} 303}
304 304
305/** 305/**
306 * Set device/driver capabilities
307 */
308int
309drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
310{
311 struct drm_set_client_cap *req = data;
312
313 switch (req->capability) {
314 case DRM_CLIENT_CAP_STEREO_3D:
315 if (req->value > 1)
316 return -EINVAL;
317 file_priv->stereo_allowed = req->value;
318 break;
319 default:
320 return -EINVAL;
321 }
322
323 return 0;
324}
325
326/**
306 * Setversion ioctl. 327 * Setversion ioctl.
307 * 328 *
308 * \param inode device inode. 329 * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f92da0a32f0d..f9af048828ea 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,9 +43,8 @@
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45/* Access macro for slots in vblank timestamp ringbuffer. */ 45/* Access macro for slots in vblank timestamp ringbuffer. */
46#define vblanktimestamp(dev, crtc, count) ( \ 46#define vblanktimestamp(dev, crtc, count) \
47 (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ 47 ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
48 ((count) % DRM_VBLANKTIME_RBSIZE)])
49 48
50/* Retry timestamp calculation up to 3 times to satisfy 49/* Retry timestamp calculation up to 3 times to satisfy
51 * drm_timestamp_precision before giving up. 50 * drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
89 */ 88 */
90static void clear_vblank_timestamps(struct drm_device *dev, int crtc) 89static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
91{ 90{
92 memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, 91 memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
93 DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
94} 92}
95 93
96/* 94/*
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
115 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 113 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
116 114
117 dev->driver->disable_vblank(dev, crtc); 115 dev->driver->disable_vblank(dev, crtc);
118 dev->vblank_enabled[crtc] = 0; 116 dev->vblank[crtc].enabled = false;
119 117
120 /* No further vblank irq's will be processed after 118 /* No further vblank irq's will be processed after
121 * this point. Get current hardware vblank count and 119 * this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
130 * delayed gpu counter increment. 128 * delayed gpu counter increment.
131 */ 129 */
132 do { 130 do {
133 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 131 dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
134 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 132 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
135 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); 133 } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
136 134
137 if (!count) 135 if (!count)
138 vblrc = 0; 136 vblrc = 0;
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
140 /* Compute time difference to stored timestamp of last vblank 138 /* Compute time difference to stored timestamp of last vblank
141 * as updated by last invocation of drm_handle_vblank() in vblank irq. 139 * as updated by last invocation of drm_handle_vblank() in vblank irq.
142 */ 140 */
143 vblcount = atomic_read(&dev->_vblank_count[crtc]); 141 vblcount = atomic_read(&dev->vblank[crtc].count);
144 diff_ns = timeval_to_ns(&tvblank) - 142 diff_ns = timeval_to_ns(&tvblank) -
145 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 143 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
146 144
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
157 * hope for the best. 155 * hope for the best.
158 */ 156 */
159 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { 157 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
160 atomic_inc(&dev->_vblank_count[crtc]); 158 atomic_inc(&dev->vblank[crtc].count);
161 smp_mb__after_atomic_inc(); 159 smp_mb__after_atomic_inc();
162 } 160 }
163 161
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
178 176
179 for (i = 0; i < dev->num_crtcs; i++) { 177 for (i = 0; i < dev->num_crtcs; i++) {
180 spin_lock_irqsave(&dev->vbl_lock, irqflags); 178 spin_lock_irqsave(&dev->vbl_lock, irqflags);
181 if (atomic_read(&dev->vblank_refcount[i]) == 0 && 179 if (atomic_read(&dev->vblank[i].refcount) == 0 &&
182 dev->vblank_enabled[i]) { 180 dev->vblank[i].enabled) {
183 DRM_DEBUG("disabling vblank on crtc %d\n", i); 181 DRM_DEBUG("disabling vblank on crtc %d\n", i);
184 vblank_disable_and_save(dev, i); 182 vblank_disable_and_save(dev, i);
185 } 183 }
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
197 195
198 vblank_disable_fn((unsigned long)dev); 196 vblank_disable_fn((unsigned long)dev);
199 197
200 kfree(dev->vbl_queue); 198 kfree(dev->vblank);
201 kfree(dev->_vblank_count);
202 kfree(dev->vblank_refcount);
203 kfree(dev->vblank_enabled);
204 kfree(dev->last_vblank);
205 kfree(dev->last_vblank_wait);
206 kfree(dev->vblank_inmodeset);
207 kfree(dev->_vblank_time);
208 199
209 dev->num_crtcs = 0; 200 dev->num_crtcs = 0;
210} 201}
@@ -221,40 +212,12 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
221 212
222 dev->num_crtcs = num_crtcs; 213 dev->num_crtcs = num_crtcs;
223 214
224 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, 215 dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
225 GFP_KERNEL); 216 if (!dev->vblank)
226 if (!dev->vbl_queue)
227 goto err; 217 goto err;
228 218
229 dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL); 219 for (i = 0; i < num_crtcs; i++)
230 if (!dev->_vblank_count) 220 init_waitqueue_head(&dev->vblank[i].queue);
231 goto err;
232
233 dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
234 GFP_KERNEL);
235 if (!dev->vblank_refcount)
236 goto err;
237
238 dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
239 if (!dev->vblank_enabled)
240 goto err;
241
242 dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
243 if (!dev->last_vblank)
244 goto err;
245
246 dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
247 if (!dev->last_vblank_wait)
248 goto err;
249
250 dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
251 if (!dev->vblank_inmodeset)
252 goto err;
253
254 dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
255 sizeof(struct timeval), GFP_KERNEL);
256 if (!dev->_vblank_time)
257 goto err;
258 221
259 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); 222 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
260 223
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
264 else 227 else
265 DRM_INFO("No driver support for vblank timestamp query.\n"); 228 DRM_INFO("No driver support for vblank timestamp query.\n");
266 229
267 /* Zero per-crtc vblank stuff */ 230 dev->vblank_disable_allowed = false;
268 for (i = 0; i < num_crtcs; i++) {
269 init_waitqueue_head(&dev->vbl_queue[i]);
270 atomic_set(&dev->_vblank_count[i], 0);
271 atomic_set(&dev->vblank_refcount[i], 0);
272 }
273 231
274 dev->vblank_disable_allowed = 0;
275 return 0; 232 return 0;
276 233
277err: 234err:
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
336 mutex_unlock(&dev->struct_mutex); 293 mutex_unlock(&dev->struct_mutex);
337 return -EBUSY; 294 return -EBUSY;
338 } 295 }
339 dev->irq_enabled = 1; 296 dev->irq_enabled = true;
340 mutex_unlock(&dev->struct_mutex); 297 mutex_unlock(&dev->struct_mutex);
341 298
342 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 299 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
359 316
360 if (ret < 0) { 317 if (ret < 0) {
361 mutex_lock(&dev->struct_mutex); 318 mutex_lock(&dev->struct_mutex);
362 dev->irq_enabled = 0; 319 dev->irq_enabled = false;
363 mutex_unlock(&dev->struct_mutex); 320 mutex_unlock(&dev->struct_mutex);
364 return ret; 321 return ret;
365 } 322 }
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
373 330
374 if (ret < 0) { 331 if (ret < 0) {
375 mutex_lock(&dev->struct_mutex); 332 mutex_lock(&dev->struct_mutex);
376 dev->irq_enabled = 0; 333 dev->irq_enabled = false;
377 mutex_unlock(&dev->struct_mutex); 334 mutex_unlock(&dev->struct_mutex);
378 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 335 if (!drm_core_check_feature(dev, DRIVER_MODESET))
379 vga_client_register(dev->pdev, NULL, NULL, NULL); 336 vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
394int drm_irq_uninstall(struct drm_device *dev) 351int drm_irq_uninstall(struct drm_device *dev)
395{ 352{
396 unsigned long irqflags; 353 unsigned long irqflags;
397 int irq_enabled, i; 354 bool irq_enabled;
355 int i;
398 356
399 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 357 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
400 return -EINVAL; 358 return -EINVAL;
401 359
402 mutex_lock(&dev->struct_mutex); 360 mutex_lock(&dev->struct_mutex);
403 irq_enabled = dev->irq_enabled; 361 irq_enabled = dev->irq_enabled;
404 dev->irq_enabled = 0; 362 dev->irq_enabled = false;
405 mutex_unlock(&dev->struct_mutex); 363 mutex_unlock(&dev->struct_mutex);
406 364
407 /* 365 /*
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
410 if (dev->num_crtcs) { 368 if (dev->num_crtcs) {
411 spin_lock_irqsave(&dev->vbl_lock, irqflags); 369 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 for (i = 0; i < dev->num_crtcs; i++) { 370 for (i = 0; i < dev->num_crtcs; i++) {
413 DRM_WAKEUP(&dev->vbl_queue[i]); 371 DRM_WAKEUP(&dev->vblank[i].queue);
414 dev->vblank_enabled[i] = 0; 372 dev->vblank[i].enabled = false;
415 dev->last_vblank[i] = 373 dev->vblank[i].last =
416 dev->driver->get_vblank_counter(dev, i); 374 dev->driver->get_vblank_counter(dev, i);
417 } 375 }
418 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 376 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -795,7 +753,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
795 */ 753 */
796u32 drm_vblank_count(struct drm_device *dev, int crtc) 754u32 drm_vblank_count(struct drm_device *dev, int crtc)
797{ 755{
798 return atomic_read(&dev->_vblank_count[crtc]); 756 return atomic_read(&dev->vblank[crtc].count);
799} 757}
800EXPORT_SYMBOL(drm_vblank_count); 758EXPORT_SYMBOL(drm_vblank_count);
801 759
@@ -824,10 +782,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
824 * a seqlock. 782 * a seqlock.
825 */ 783 */
826 do { 784 do {
827 cur_vblank = atomic_read(&dev->_vblank_count[crtc]); 785 cur_vblank = atomic_read(&dev->vblank[crtc].count);
828 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); 786 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
829 smp_rmb(); 787 smp_rmb();
830 } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); 788 } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
831 789
832 return cur_vblank; 790 return cur_vblank;
833} 791}
@@ -914,12 +872,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
914 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); 872 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
915 873
916 /* Deal with counter wrap */ 874 /* Deal with counter wrap */
917 diff = cur_vblank - dev->last_vblank[crtc]; 875 diff = cur_vblank - dev->vblank[crtc].last;
918 if (cur_vblank < dev->last_vblank[crtc]) { 876 if (cur_vblank < dev->vblank[crtc].last) {
919 diff += dev->max_vblank_count; 877 diff += dev->max_vblank_count;
920 878
921 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 879 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
922 crtc, dev->last_vblank[crtc], cur_vblank, diff); 880 crtc, dev->vblank[crtc].last, cur_vblank, diff);
923 } 881 }
924 882
925 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", 883 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +888,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
930 * reinitialize delayed at next vblank interrupt in that case. 888 * reinitialize delayed at next vblank interrupt in that case.
931 */ 889 */
932 if (rc) { 890 if (rc) {
933 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 891 tslot = atomic_read(&dev->vblank[crtc].count) + diff;
934 vblanktimestamp(dev, crtc, tslot) = t_vblank; 892 vblanktimestamp(dev, crtc, tslot) = t_vblank;
935 } 893 }
936 894
937 smp_mb__before_atomic_inc(); 895 smp_mb__before_atomic_inc();
938 atomic_add(diff, &dev->_vblank_count[crtc]); 896 atomic_add(diff, &dev->vblank[crtc].count);
939 smp_mb__after_atomic_inc(); 897 smp_mb__after_atomic_inc();
940} 898}
941 899
@@ -957,9 +915,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
957 915
958 spin_lock_irqsave(&dev->vbl_lock, irqflags); 916 spin_lock_irqsave(&dev->vbl_lock, irqflags);
959 /* Going from 0->1 means we have to enable interrupts again */ 917 /* Going from 0->1 means we have to enable interrupts again */
960 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 918 if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
961 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 919 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
962 if (!dev->vblank_enabled[crtc]) { 920 if (!dev->vblank[crtc].enabled) {
963 /* Enable vblank irqs under vblank_time_lock protection. 921 /* Enable vblank irqs under vblank_time_lock protection.
964 * All vblank count & timestamp updates are held off 922 * All vblank count & timestamp updates are held off
965 * until we are done reinitializing master counter and 923 * until we are done reinitializing master counter and
@@ -970,16 +928,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
970 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", 928 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
971 crtc, ret); 929 crtc, ret);
972 if (ret) 930 if (ret)
973 atomic_dec(&dev->vblank_refcount[crtc]); 931 atomic_dec(&dev->vblank[crtc].refcount);
974 else { 932 else {
975 dev->vblank_enabled[crtc] = 1; 933 dev->vblank[crtc].enabled = true;
976 drm_update_vblank_count(dev, crtc); 934 drm_update_vblank_count(dev, crtc);
977 } 935 }
978 } 936 }
979 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 937 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
980 } else { 938 } else {
981 if (!dev->vblank_enabled[crtc]) { 939 if (!dev->vblank[crtc].enabled) {
982 atomic_dec(&dev->vblank_refcount[crtc]); 940 atomic_dec(&dev->vblank[crtc].refcount);
983 ret = -EINVAL; 941 ret = -EINVAL;
984 } 942 }
985 } 943 }
@@ -999,10 +957,10 @@ EXPORT_SYMBOL(drm_vblank_get);
999 */ 957 */
1000void drm_vblank_put(struct drm_device *dev, int crtc) 958void drm_vblank_put(struct drm_device *dev, int crtc)
1001{ 959{
1002 BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); 960 BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
1003 961
1004 /* Last user schedules interrupt disable */ 962 /* Last user schedules interrupt disable */
1005 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && 963 if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
1006 (drm_vblank_offdelay > 0)) 964 (drm_vblank_offdelay > 0))
1007 mod_timer(&dev->vblank_disable_timer, 965 mod_timer(&dev->vblank_disable_timer,
1008 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); 966 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +983,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
1025 983
1026 spin_lock_irqsave(&dev->vbl_lock, irqflags); 984 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1027 vblank_disable_and_save(dev, crtc); 985 vblank_disable_and_save(dev, crtc);
1028 DRM_WAKEUP(&dev->vbl_queue[crtc]); 986 DRM_WAKEUP(&dev->vblank[crtc].queue);
1029 987
1030 /* Send any queued vblank events, lest the natives grow disquiet */ 988 /* Send any queued vblank events, lest the natives grow disquiet */
1031 seq = drm_vblank_count_and_time(dev, crtc, &now); 989 seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1025,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1067 * to avoid corrupting the count if multiple, mismatch calls occur), 1025 * to avoid corrupting the count if multiple, mismatch calls occur),
1068 * so that interrupts remain enabled in the interim. 1026 * so that interrupts remain enabled in the interim.
1069 */ 1027 */
1070 if (!dev->vblank_inmodeset[crtc]) { 1028 if (!dev->vblank[crtc].inmodeset) {
1071 dev->vblank_inmodeset[crtc] = 0x1; 1029 dev->vblank[crtc].inmodeset = 0x1;
1072 if (drm_vblank_get(dev, crtc) == 0) 1030 if (drm_vblank_get(dev, crtc) == 0)
1073 dev->vblank_inmodeset[crtc] |= 0x2; 1031 dev->vblank[crtc].inmodeset |= 0x2;
1074 } 1032 }
1075} 1033}
1076EXPORT_SYMBOL(drm_vblank_pre_modeset); 1034EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1041,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1083 if (!dev->num_crtcs) 1041 if (!dev->num_crtcs)
1084 return; 1042 return;
1085 1043
1086 if (dev->vblank_inmodeset[crtc]) { 1044 if (dev->vblank[crtc].inmodeset) {
1087 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1045 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1088 dev->vblank_disable_allowed = 1; 1046 dev->vblank_disable_allowed = true;
1089 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1047 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1090 1048
1091 if (dev->vblank_inmodeset[crtc] & 0x2) 1049 if (dev->vblank[crtc].inmodeset & 0x2)
1092 drm_vblank_put(dev, crtc); 1050 drm_vblank_put(dev, crtc);
1093 1051
1094 dev->vblank_inmodeset[crtc] = 0; 1052 dev->vblank[crtc].inmodeset = 0;
1095 } 1053 }
1096} 1054}
1097EXPORT_SYMBOL(drm_vblank_post_modeset); 1055EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1246,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1288 1246
1289 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1247 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
1290 vblwait->request.sequence, crtc); 1248 vblwait->request.sequence, crtc);
1291 dev->last_vblank_wait[crtc] = vblwait->request.sequence; 1249 dev->vblank[crtc].last_wait = vblwait->request.sequence;
1292 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 1250 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
1293 (((drm_vblank_count(dev, crtc) - 1251 (((drm_vblank_count(dev, crtc) -
1294 vblwait->request.sequence) <= (1 << 23)) || 1252 vblwait->request.sequence) <= (1 << 23)) ||
1295 !dev->irq_enabled)); 1253 !dev->irq_enabled));
@@ -1367,7 +1325,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1367 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 1325 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
1368 1326
1369 /* Vblank irq handling disabled. Nothing to do. */ 1327 /* Vblank irq handling disabled. Nothing to do. */
1370 if (!dev->vblank_enabled[crtc]) { 1328 if (!dev->vblank[crtc].enabled) {
1371 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1329 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1372 return false; 1330 return false;
1373 } 1331 }
@@ -1377,7 +1335,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1377 */ 1335 */
1378 1336
1379 /* Get current timestamp and count. */ 1337 /* Get current timestamp and count. */
1380 vblcount = atomic_read(&dev->_vblank_count[crtc]); 1338 vblcount = atomic_read(&dev->vblank[crtc].count);
1381 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1339 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1382 1340
1383 /* Compute time difference to timestamp of last vblank */ 1341 /* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1359,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1401 * the timestamp computed above. 1359 * the timestamp computed above.
1402 */ 1360 */
1403 smp_mb__before_atomic_inc(); 1361 smp_mb__before_atomic_inc();
1404 atomic_inc(&dev->_vblank_count[crtc]); 1362 atomic_inc(&dev->vblank[crtc].count);
1405 smp_mb__after_atomic_inc(); 1363 smp_mb__after_atomic_inc();
1406 } else { 1364 } else {
1407 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1365 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1408 crtc, (int) diff_ns); 1366 crtc, (int) diff_ns);
1409 } 1367 }
1410 1368
1411 DRM_WAKEUP(&dev->vbl_queue[crtc]); 1369 DRM_WAKEUP(&dev->vblank[crtc].queue);
1412 drm_handle_vblank_events(dev, crtc); 1370 drm_handle_vblank_events(dev, crtc);
1413 1371
1414 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1372 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index d752c96d6090..f6452682141b 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
86 if (drm_lock_take(&master->lock, lock->context)) { 86 if (drm_lock_take(&master->lock, lock->context)) {
87 master->lock.file_priv = file_priv; 87 master->lock.file_priv = file_priv;
88 master->lock.lock_time = jiffies; 88 master->lock.lock_time = jiffies;
89 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
90 break; /* Got lock */ 89 break; /* Got lock */
91 } 90 }
92 91
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
157 return -EINVAL; 156 return -EINVAL;
158 } 157 }
159 158
160 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
161
162 if (drm_lock_free(&master->lock, lock->context)) { 159 if (drm_lock_free(&master->lock, lock->context)) {
163 /* FIXME: Should really bail out here. */ 160 /* FIXME: Should really bail out here. */
164 } 161 }
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb62b757..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
707/** 707/**
708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters 708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters
709 * @p: mode 709 * @p: mode
710 * @adjust_flags: unused? (FIXME) 710 * @adjust_flags: a combination of adjustment flags
711 * 711 *
712 * LOCKING: 712 * LOCKING:
713 * None. 713 * None.
714 * 714 *
715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary. 715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
716 *
717 * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
718 * interlaced modes.
719 * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
720 * buffers containing two eyes (only adjust the timings when needed, eg. for
721 * "frame packing" or "side by side full").
716 */ 722 */
717void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) 723void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
718{ 724{
719 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN)) 725 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
720 return; 726 return;
721 727
728 p->crtc_clock = p->clock;
722 p->crtc_hdisplay = p->hdisplay; 729 p->crtc_hdisplay = p->hdisplay;
723 p->crtc_hsync_start = p->hsync_start; 730 p->crtc_hsync_start = p->hsync_start;
724 p->crtc_hsync_end = p->hsync_end; 731 p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
752 p->crtc_vtotal *= p->vscan; 759 p->crtc_vtotal *= p->vscan;
753 } 760 }
754 761
762 if (adjust_flags & CRTC_STEREO_DOUBLE) {
763 unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
764
765 switch (layout) {
766 case DRM_MODE_FLAG_3D_FRAME_PACKING:
767 p->crtc_clock *= 2;
768 p->crtc_vdisplay += p->crtc_vtotal;
769 p->crtc_vsync_start += p->crtc_vtotal;
770 p->crtc_vsync_end += p->crtc_vtotal;
771 p->crtc_vtotal += p->crtc_vtotal;
772 break;
773 }
774 }
775
755 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay); 776 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
756 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); 777 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
757 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); 778 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
830 } else if (mode1->clock != mode2->clock) 851 } else if (mode1->clock != mode2->clock)
831 return false; 852 return false;
832 853
833 return drm_mode_equal_no_clocks(mode1, mode2); 854 if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
855 (mode2->flags & DRM_MODE_FLAG_3D_MASK))
856 return false;
857
858 return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
834} 859}
835EXPORT_SYMBOL(drm_mode_equal); 860EXPORT_SYMBOL(drm_mode_equal);
836 861
837/** 862/**
838 * drm_mode_equal_no_clocks - test modes for equality 863 * drm_mode_equal_no_clocks_no_stereo - test modes for equality
839 * @mode1: first mode 864 * @mode1: first mode
840 * @mode2: second mode 865 * @mode2: second mode
841 * 866 *
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
843 * None. 868 * None.
844 * 869 *
845 * Check to see if @mode1 and @mode2 are equivalent, but 870 * Check to see if @mode1 and @mode2 are equivalent, but
846 * don't check the pixel clocks. 871 * don't check the pixel clocks nor the stereo layout.
847 * 872 *
848 * RETURNS: 873 * RETURNS:
849 * True if the modes are equal, false otherwise. 874 * True if the modes are equal, false otherwise.
850 */ 875 */
851bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) 876bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
877 const struct drm_display_mode *mode2)
852{ 878{
853 if (mode1->hdisplay == mode2->hdisplay && 879 if (mode1->hdisplay == mode2->hdisplay &&
854 mode1->hsync_start == mode2->hsync_start && 880 mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
860 mode1->vsync_end == mode2->vsync_end && 886 mode1->vsync_end == mode2->vsync_end &&
861 mode1->vtotal == mode2->vtotal && 887 mode1->vtotal == mode2->vtotal &&
862 mode1->vscan == mode2->vscan && 888 mode1->vscan == mode2->vscan &&
863 mode1->flags == mode2->flags) 889 (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
890 (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
864 return true; 891 return true;
865 892
866 return false; 893 return false;
867} 894}
868EXPORT_SYMBOL(drm_mode_equal_no_clocks); 895EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
869 896
870/** 897/**
871 * drm_mode_validate_size - make sure modes adhere to size constraints 898 * drm_mode_validate_size - make sure modes adhere to size constraints
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1f96cee6eee8..f00d7a9671ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
322 322
323 DRM_DEBUG("\n"); 323 DRM_DEBUG("\n");
324 324
325 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 325 dev = drm_dev_alloc(driver, &pdev->dev);
326 if (!dev) 326 if (!dev)
327 return -ENOMEM; 327 return -ENOMEM;
328 328
329 ret = pci_enable_device(pdev); 329 ret = pci_enable_device(pdev);
330 if (ret) 330 if (ret)
331 goto err_g1; 331 goto err_free;
332 332
333 dev->pdev = pdev; 333 dev->pdev = pdev;
334 dev->dev = &pdev->dev;
335
336 dev->pci_device = pdev->device;
337 dev->pci_vendor = pdev->vendor;
338
339#ifdef __alpha__ 334#ifdef __alpha__
340 dev->hose = pdev->sysdata; 335 dev->hose = pdev->sysdata;
341#endif 336#endif
342 337
343 mutex_lock(&drm_global_mutex); 338 if (drm_core_check_feature(dev, DRIVER_MODESET))
344
345 if ((ret = drm_fill_in_dev(dev, ent, driver))) {
346 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
347 goto err_g2;
348 }
349
350 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
351 pci_set_drvdata(pdev, dev); 339 pci_set_drvdata(pdev, dev);
352 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
353 if (ret)
354 goto err_g2;
355 }
356
357 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
358 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
359 if (ret)
360 goto err_g21;
361 }
362
363 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
364 goto err_g3;
365
366 if (dev->driver->load) {
367 ret = dev->driver->load(dev, ent->driver_data);
368 if (ret)
369 goto err_g4;
370 }
371 340
372 /* setup the grouping for the legacy output */ 341 ret = drm_dev_register(dev, ent->driver_data);
373 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 342 if (ret)
374 ret = drm_mode_group_init_legacy_group(dev, 343 goto err_pci;
375 &dev->primary->mode_group);
376 if (ret)
377 goto err_g4;
378 }
379
380 list_add_tail(&dev->driver_item, &driver->device_list);
381 344
382 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 345 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
383 driver->name, driver->major, driver->minor, driver->patchlevel, 346 driver->name, driver->major, driver->minor, driver->patchlevel,
384 driver->date, pci_name(pdev), dev->primary->index); 347 driver->date, pci_name(pdev), dev->primary->index);
385 348
386 mutex_unlock(&drm_global_mutex);
387 return 0; 349 return 0;
388 350
389err_g4: 351err_pci:
390 drm_put_minor(&dev->primary);
391err_g3:
392 if (dev->render)
393 drm_put_minor(&dev->render);
394err_g21:
395 if (drm_core_check_feature(dev, DRIVER_MODESET))
396 drm_put_minor(&dev->control);
397err_g2:
398 pci_disable_device(pdev); 352 pci_disable_device(pdev);
399err_g1: 353err_free:
400 kfree(dev); 354 drm_dev_free(dev);
401 mutex_unlock(&drm_global_mutex);
402 return ret; 355 return ret;
403} 356}
404EXPORT_SYMBOL(drm_get_pci_dev); 357EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index f7a18c6ba4c4..fc24fee8ec83 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
47 47
48 DRM_DEBUG("\n"); 48 DRM_DEBUG("\n");
49 49
50 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 50 dev = drm_dev_alloc(driver, &platdev->dev);
51 if (!dev) 51 if (!dev)
52 return -ENOMEM; 52 return -ENOMEM;
53 53
54 dev->platformdev = platdev; 54 dev->platformdev = platdev;
55 dev->dev = &platdev->dev;
56 55
57 mutex_lock(&drm_global_mutex); 56 ret = drm_dev_register(dev, 0);
58
59 ret = drm_fill_in_dev(dev, NULL, driver);
60
61 if (ret) {
62 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
63 goto err_g1;
64 }
65
66 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
67 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
68 if (ret)
69 goto err_g1;
70 }
71
72 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
73 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
74 if (ret)
75 goto err_g11;
76 }
77
78 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
79 if (ret) 57 if (ret)
80 goto err_g2; 58 goto err_free;
81
82 if (dev->driver->load) {
83 ret = dev->driver->load(dev, 0);
84 if (ret)
85 goto err_g3;
86 }
87
88 /* setup the grouping for the legacy output */
89 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
90 ret = drm_mode_group_init_legacy_group(dev,
91 &dev->primary->mode_group);
92 if (ret)
93 goto err_g3;
94 }
95
96 list_add_tail(&dev->driver_item, &driver->device_list);
97
98 mutex_unlock(&drm_global_mutex);
99 59
100 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 60 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
101 driver->name, driver->major, driver->minor, driver->patchlevel, 61 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
103 63
104 return 0; 64 return 0;
105 65
106err_g3: 66err_free:
107 drm_put_minor(&dev->primary); 67 drm_dev_free(dev);
108err_g2:
109 if (dev->render)
110 drm_put_minor(&dev->render);
111err_g11:
112 if (drm_core_check_feature(dev, DRIVER_MODESET))
113 drm_put_minor(&dev->control);
114err_g1:
115 kfree(dev);
116 mutex_unlock(&drm_global_mutex);
117 return ret; 68 return ret;
118} 69}
119 70
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 276d470f7b3e..56805c39c906 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
637 unsigned count; 637 unsigned count;
638 struct scatterlist *sg; 638 struct scatterlist *sg;
639 struct page *page; 639 struct page *page;
640 u32 len, offset; 640 u32 len;
641 int pg_index; 641 int pg_index;
642 dma_addr_t addr; 642 dma_addr_t addr;
643 643
644 pg_index = 0; 644 pg_index = 0;
645 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 645 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
646 len = sg->length; 646 len = sg->length;
647 offset = sg->offset;
648 page = sg_page(sg); 647 page = sg_page(sg);
649 addr = sg_dma_address(sg); 648 addr = sg_dma_address(sg);
650 649
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 39d864576be4..26055abf94ee 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -254,70 +254,6 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
254 return 0; 254 return 0;
255} 255}
256 256
257int drm_fill_in_dev(struct drm_device *dev,
258 const struct pci_device_id *ent,
259 struct drm_driver *driver)
260{
261 int retcode;
262
263 INIT_LIST_HEAD(&dev->filelist);
264 INIT_LIST_HEAD(&dev->ctxlist);
265 INIT_LIST_HEAD(&dev->vmalist);
266 INIT_LIST_HEAD(&dev->maplist);
267 INIT_LIST_HEAD(&dev->vblank_event_list);
268
269 spin_lock_init(&dev->count_lock);
270 spin_lock_init(&dev->event_lock);
271 mutex_init(&dev->struct_mutex);
272 mutex_init(&dev->ctxlist_mutex);
273
274 if (drm_ht_create(&dev->map_hash, 12)) {
275 return -ENOMEM;
276 }
277
278 /* the DRM has 6 basic counters */
279 dev->counters = 6;
280 dev->types[0] = _DRM_STAT_LOCK;
281 dev->types[1] = _DRM_STAT_OPENS;
282 dev->types[2] = _DRM_STAT_CLOSES;
283 dev->types[3] = _DRM_STAT_IOCTLS;
284 dev->types[4] = _DRM_STAT_LOCKS;
285 dev->types[5] = _DRM_STAT_UNLOCKS;
286
287 dev->driver = driver;
288
289 if (dev->driver->bus->agp_init) {
290 retcode = dev->driver->bus->agp_init(dev);
291 if (retcode)
292 goto error_out_unreg;
293 }
294
295
296
297 retcode = drm_ctxbitmap_init(dev);
298 if (retcode) {
299 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
300 goto error_out_unreg;
301 }
302
303 if (driver->driver_features & DRIVER_GEM) {
304 retcode = drm_gem_init(dev);
305 if (retcode) {
306 DRM_ERROR("Cannot initialize graphics execution "
307 "manager (GEM)\n");
308 goto error_out_unreg;
309 }
310 }
311
312 return 0;
313
314 error_out_unreg:
315 drm_lastclose(dev);
316 return retcode;
317}
318EXPORT_SYMBOL(drm_fill_in_dev);
319
320
321/** 257/**
322 * Get a secondary minor number. 258 * Get a secondary minor number.
323 * 259 *
@@ -427,66 +363,237 @@ static void drm_unplug_minor(struct drm_minor *minor)
427 */ 363 */
428void drm_put_dev(struct drm_device *dev) 364void drm_put_dev(struct drm_device *dev)
429{ 365{
430 struct drm_driver *driver;
431 struct drm_map_list *r_list, *list_temp;
432
433 DRM_DEBUG("\n"); 366 DRM_DEBUG("\n");
434 367
435 if (!dev) { 368 if (!dev) {
436 DRM_ERROR("cleanup called no dev\n"); 369 DRM_ERROR("cleanup called no dev\n");
437 return; 370 return;
438 } 371 }
439 driver = dev->driver;
440 372
441 drm_lastclose(dev); 373 drm_dev_unregister(dev);
374 drm_dev_free(dev);
375}
376EXPORT_SYMBOL(drm_put_dev);
442 377
443 if (dev->driver->unload) 378void drm_unplug_dev(struct drm_device *dev)
444 dev->driver->unload(dev); 379{
380 /* for a USB device */
381 if (drm_core_check_feature(dev, DRIVER_MODESET))
382 drm_unplug_minor(dev->control);
383 if (dev->render)
384 drm_unplug_minor(dev->render);
385 drm_unplug_minor(dev->primary);
445 386
446 if (dev->driver->bus->agp_destroy) 387 mutex_lock(&drm_global_mutex);
447 dev->driver->bus->agp_destroy(dev);
448 388
449 drm_vblank_cleanup(dev); 389 drm_device_set_unplugged(dev);
450 390
451 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 391 if (dev->open_count == 0) {
452 drm_rmmap(dev, r_list->map); 392 drm_put_dev(dev);
453 drm_ht_remove(&dev->map_hash); 393 }
394 mutex_unlock(&drm_global_mutex);
395}
396EXPORT_SYMBOL(drm_unplug_dev);
454 397
455 drm_ctxbitmap_cleanup(dev); 398/**
399 * drm_dev_alloc - Allocate new drm device
400 * @driver: DRM driver to allocate device for
401 * @parent: Parent device object
402 *
403 * Allocate and initialize a new DRM device. No device registration is done.
404 * Call drm_dev_register() to advertice the device to user space and register it
405 * with other core subsystems.
406 *
407 * RETURNS:
408 * Pointer to new DRM device, or NULL if out of memory.
409 */
410struct drm_device *drm_dev_alloc(struct drm_driver *driver,
411 struct device *parent)
412{
413 struct drm_device *dev;
414 int ret;
456 415
457 if (drm_core_check_feature(dev, DRIVER_MODESET)) 416 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
458 drm_put_minor(&dev->control); 417 if (!dev)
418 return NULL;
459 419
460 if (dev->render) 420 dev->dev = parent;
461 drm_put_minor(&dev->render); 421 dev->driver = driver;
422
423 INIT_LIST_HEAD(&dev->filelist);
424 INIT_LIST_HEAD(&dev->ctxlist);
425 INIT_LIST_HEAD(&dev->vmalist);
426 INIT_LIST_HEAD(&dev->maplist);
427 INIT_LIST_HEAD(&dev->vblank_event_list);
428
429 spin_lock_init(&dev->count_lock);
430 spin_lock_init(&dev->event_lock);
431 mutex_init(&dev->struct_mutex);
432 mutex_init(&dev->ctxlist_mutex);
433
434 if (drm_ht_create(&dev->map_hash, 12))
435 goto err_free;
462 436
463 if (driver->driver_features & DRIVER_GEM) 437 ret = drm_ctxbitmap_init(dev);
438 if (ret) {
439 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
440 goto err_ht;
441 }
442
443 if (driver->driver_features & DRIVER_GEM) {
444 ret = drm_gem_init(dev);
445 if (ret) {
446 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
447 goto err_ctxbitmap;
448 }
449 }
450
451 return dev;
452
453err_ctxbitmap:
454 drm_ctxbitmap_cleanup(dev);
455err_ht:
456 drm_ht_remove(&dev->map_hash);
457err_free:
458 kfree(dev);
459 return NULL;
460}
461EXPORT_SYMBOL(drm_dev_alloc);
462
463/**
464 * drm_dev_free - Free DRM device
465 * @dev: DRM device to free
466 *
467 * Free a DRM device that has previously been allocated via drm_dev_alloc().
468 * You must not use kfree() instead or you will leak memory.
469 *
470 * This must not be called once the device got registered. Use drm_put_dev()
471 * instead, which then calls drm_dev_free().
472 */
473void drm_dev_free(struct drm_device *dev)
474{
475 if (dev->driver->driver_features & DRIVER_GEM)
464 drm_gem_destroy(dev); 476 drm_gem_destroy(dev);
465 477
466 drm_put_minor(&dev->primary); 478 drm_ctxbitmap_cleanup(dev);
479 drm_ht_remove(&dev->map_hash);
467 480
468 list_del(&dev->driver_item);
469 kfree(dev->devname); 481 kfree(dev->devname);
470 kfree(dev); 482 kfree(dev);
471} 483}
472EXPORT_SYMBOL(drm_put_dev); 484EXPORT_SYMBOL(drm_dev_free);
473 485
474void drm_unplug_dev(struct drm_device *dev) 486/**
487 * drm_dev_register - Register DRM device
488 * @dev: Device to register
489 *
490 * Register the DRM device @dev with the system, advertise device to user-space
491 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
492 * previously.
493 *
494 * Never call this twice on any device!
495 *
496 * RETURNS:
497 * 0 on success, negative error code on failure.
498 */
499int drm_dev_register(struct drm_device *dev, unsigned long flags)
475{ 500{
476 /* for a USB device */ 501 int ret;
477 if (drm_core_check_feature(dev, DRIVER_MODESET))
478 drm_unplug_minor(dev->control);
479 if (dev->render)
480 drm_unplug_minor(dev->render);
481 drm_unplug_minor(dev->primary);
482 502
483 mutex_lock(&drm_global_mutex); 503 mutex_lock(&drm_global_mutex);
484 504
485 drm_device_set_unplugged(dev); 505 if (dev->driver->bus->agp_init) {
506 ret = dev->driver->bus->agp_init(dev);
507 if (ret)
508 goto out_unlock;
509 }
486 510
487 if (dev->open_count == 0) { 511 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
488 drm_put_dev(dev); 512 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
513 if (ret)
514 goto err_agp;
489 } 515 }
516
517 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
518 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
519 if (ret)
520 goto err_control_node;
521 }
522
523 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
524 if (ret)
525 goto err_render_node;
526
527 if (dev->driver->load) {
528 ret = dev->driver->load(dev, flags);
529 if (ret)
530 goto err_primary_node;
531 }
532
533 /* setup grouping for legacy outputs */
534 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
535 ret = drm_mode_group_init_legacy_group(dev,
536 &dev->primary->mode_group);
537 if (ret)
538 goto err_unload;
539 }
540
541 list_add_tail(&dev->driver_item, &dev->driver->device_list);
542
543 ret = 0;
544 goto out_unlock;
545
546err_unload:
547 if (dev->driver->unload)
548 dev->driver->unload(dev);
549err_primary_node:
550 drm_put_minor(&dev->primary);
551err_render_node:
552 if (dev->render)
553 drm_put_minor(&dev->render);
554err_control_node:
555 if (dev->control)
556 drm_put_minor(&dev->control);
557err_agp:
558 if (dev->driver->bus->agp_destroy)
559 dev->driver->bus->agp_destroy(dev);
560out_unlock:
490 mutex_unlock(&drm_global_mutex); 561 mutex_unlock(&drm_global_mutex);
562 return ret;
491} 563}
492EXPORT_SYMBOL(drm_unplug_dev); 564EXPORT_SYMBOL(drm_dev_register);
565
566/**
567 * drm_dev_unregister - Unregister DRM device
568 * @dev: Device to unregister
569 *
570 * Unregister the DRM device from the system. This does the reverse of
571 * drm_dev_register() but does not deallocate the device. The caller must call
572 * drm_dev_free() to free all resources.
573 */
574void drm_dev_unregister(struct drm_device *dev)
575{
576 struct drm_map_list *r_list, *list_temp;
577
578 drm_lastclose(dev);
579
580 if (dev->driver->unload)
581 dev->driver->unload(dev);
582
583 if (dev->driver->bus->agp_destroy)
584 dev->driver->bus->agp_destroy(dev);
585
586 drm_vblank_cleanup(dev);
587
588 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
589 drm_rmmap(dev, r_list->map);
590
591 if (dev->control)
592 drm_put_minor(&dev->control);
593 if (dev->render)
594 drm_put_minor(&dev->render);
595 drm_put_minor(&dev->primary);
596
597 list_del(&dev->driver_item);
598}
599EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2290b3b73832..dae42c79154f 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,8 +22,8 @@
22#include <drm/drm_core.h> 22#include <drm/drm_core.h>
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24 24
25#define to_drm_minor(d) container_of(d, struct drm_minor, kdev) 25#define to_drm_minor(d) dev_get_drvdata(d)
26#define to_drm_connector(d) container_of(d, struct drm_connector, kdev) 26#define to_drm_connector(d) dev_get_drvdata(d)
27 27
28static struct device_type drm_sysfs_device_minor = { 28static struct device_type drm_sysfs_device_minor = {
29 .name = "drm_minor" 29 .name = "drm_minor"
@@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
162 drm_class = NULL; 162 drm_class = NULL;
163} 163}
164 164
165/**
166 * drm_sysfs_device_release - do nothing
167 * @dev: Linux device
168 *
169 * Normally, this would free the DRM device associated with @dev, along
170 * with cleaning up any other stuff. But we do that in the DRM core, so
171 * this function can just return and hope that the core does its job.
172 */
173static void drm_sysfs_device_release(struct device *dev)
174{
175 memset(dev, 0, sizeof(struct device));
176 return;
177}
178
179/* 165/*
180 * Connector properties 166 * Connector properties
181 */ 167 */
@@ -394,29 +380,26 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
394 int i; 380 int i;
395 int ret; 381 int ret;
396 382
397 /* We shouldn't get called more than once for the same connector */ 383 if (connector->kdev)
398 BUG_ON(device_is_registered(&connector->kdev)); 384 return 0;
399
400 connector->kdev.parent = &dev->primary->kdev;
401 connector->kdev.class = drm_class;
402 connector->kdev.release = drm_sysfs_device_release;
403 385
386 /* We shouldn't get called more than once for the same connector */
387 connector->kdev = device_create(drm_class, dev->primary->kdev,
388 0, connector, "card%d-%s",
389 dev->primary->index, drm_get_connector_name(connector));
404 DRM_DEBUG("adding \"%s\" to sysfs\n", 390 DRM_DEBUG("adding \"%s\" to sysfs\n",
405 drm_get_connector_name(connector)); 391 drm_get_connector_name(connector));
406 392
407 dev_set_name(&connector->kdev, "card%d-%s", 393 if (IS_ERR(connector->kdev)) {
408 dev->primary->index, drm_get_connector_name(connector)); 394 DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
409 ret = device_register(&connector->kdev); 395 ret = PTR_ERR(connector->kdev);
410
411 if (ret) {
412 DRM_ERROR("failed to register connector device: %d\n", ret);
413 goto out; 396 goto out;
414 } 397 }
415 398
416 /* Standard attributes */ 399 /* Standard attributes */
417 400
418 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) { 401 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
419 ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]); 402 ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
420 if (ret) 403 if (ret)
421 goto err_out_files; 404 goto err_out_files;
422 } 405 }
@@ -433,7 +416,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
433 case DRM_MODE_CONNECTOR_Component: 416 case DRM_MODE_CONNECTOR_Component:
434 case DRM_MODE_CONNECTOR_TV: 417 case DRM_MODE_CONNECTOR_TV:
435 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) { 418 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
436 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]); 419 ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
437 if (ret) 420 if (ret)
438 goto err_out_files; 421 goto err_out_files;
439 } 422 }
@@ -442,7 +425,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
442 break; 425 break;
443 } 426 }
444 427
445 ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr); 428 ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
446 if (ret) 429 if (ret)
447 goto err_out_files; 430 goto err_out_files;
448 431
@@ -453,10 +436,11 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
453 436
454err_out_files: 437err_out_files:
455 for (i = 0; i < opt_cnt; i++) 438 for (i = 0; i < opt_cnt; i++)
456 device_remove_file(&connector->kdev, &connector_attrs_opt1[i]); 439 device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
457 for (i = 0; i < attr_cnt; i++) 440 for (i = 0; i < attr_cnt; i++)
458 device_remove_file(&connector->kdev, &connector_attrs[i]); 441 device_remove_file(connector->kdev, &connector_attrs[i]);
459 device_unregister(&connector->kdev); 442 put_device(connector->kdev);
443 device_unregister(connector->kdev);
460 444
461out: 445out:
462 return ret; 446 return ret;
@@ -480,16 +464,17 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
480{ 464{
481 int i; 465 int i;
482 466
483 if (!connector->kdev.parent) 467 if (!connector->kdev)
484 return; 468 return;
485 DRM_DEBUG("removing \"%s\" from sysfs\n", 469 DRM_DEBUG("removing \"%s\" from sysfs\n",
486 drm_get_connector_name(connector)); 470 drm_get_connector_name(connector));
487 471
488 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) 472 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
489 device_remove_file(&connector->kdev, &connector_attrs[i]); 473 device_remove_file(connector->kdev, &connector_attrs[i]);
490 sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr); 474 sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
491 device_unregister(&connector->kdev); 475 put_device(connector->kdev);
492 connector->kdev.parent = NULL; 476 device_unregister(connector->kdev);
477 connector->kdev = NULL;
493} 478}
494EXPORT_SYMBOL(drm_sysfs_connector_remove); 479EXPORT_SYMBOL(drm_sysfs_connector_remove);
495 480
@@ -508,7 +493,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
508 493
509 DRM_DEBUG("generating hotplug event\n"); 494 DRM_DEBUG("generating hotplug event\n");
510 495
511 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); 496 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
512} 497}
513EXPORT_SYMBOL(drm_sysfs_hotplug_event); 498EXPORT_SYMBOL(drm_sysfs_hotplug_event);
514 499
@@ -523,15 +508,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
523 */ 508 */
524int drm_sysfs_device_add(struct drm_minor *minor) 509int drm_sysfs_device_add(struct drm_minor *minor)
525{ 510{
526 int err;
527 char *minor_str; 511 char *minor_str;
528 512
529 minor->kdev.parent = minor->dev->dev;
530
531 minor->kdev.class = drm_class;
532 minor->kdev.release = drm_sysfs_device_release;
533 minor->kdev.devt = minor->device;
534 minor->kdev.type = &drm_sysfs_device_minor;
535 if (minor->type == DRM_MINOR_CONTROL) 513 if (minor->type == DRM_MINOR_CONTROL)
536 minor_str = "controlD%d"; 514 minor_str = "controlD%d";
537 else if (minor->type == DRM_MINOR_RENDER) 515 else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +517,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
539 else 517 else
540 minor_str = "card%d"; 518 minor_str = "card%d";
541 519
542 dev_set_name(&minor->kdev, minor_str, minor->index); 520 minor->kdev = device_create(drm_class, minor->dev->dev,
543 521 MKDEV(DRM_MAJOR, minor->index),
544 err = device_register(&minor->kdev); 522 minor, minor_str, minor->index);
545 if (err) { 523 if (IS_ERR(minor->kdev)) {
546 DRM_ERROR("device add failed: %d\n", err); 524 DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
547 goto err_out; 525 return PTR_ERR(minor->kdev);
548 } 526 }
549
550 return 0; 527 return 0;
551
552err_out:
553 return err;
554} 528}
555 529
556/** 530/**
@@ -562,9 +536,9 @@ err_out:
562 */ 536 */
563void drm_sysfs_device_remove(struct drm_minor *minor) 537void drm_sysfs_device_remove(struct drm_minor *minor)
564{ 538{
565 if (minor->kdev.parent) 539 if (minor->kdev)
566 device_unregister(&minor->kdev); 540 device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
567 minor->kdev.parent = NULL; 541 minor->kdev = NULL;
568} 542}
569 543
570 544
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 87664723b9ce..b179b70e7853 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
7 struct drm_driver *driver) 7 struct drm_driver *driver)
8{ 8{
9 struct drm_device *dev; 9 struct drm_device *dev;
10 struct usb_device *usbdev;
11 int ret; 10 int ret;
12 11
13 DRM_DEBUG("\n"); 12 DRM_DEBUG("\n");
14 13
15 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 14 dev = drm_dev_alloc(driver, &interface->dev);
16 if (!dev) 15 if (!dev)
17 return -ENOMEM; 16 return -ENOMEM;
18 17
19 usbdev = interface_to_usbdev(interface); 18 dev->usbdev = interface_to_usbdev(interface);
20 dev->usbdev = usbdev;
21 dev->dev = &interface->dev;
22
23 mutex_lock(&drm_global_mutex);
24
25 ret = drm_fill_in_dev(dev, NULL, driver);
26 if (ret) {
27 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
28 goto err_g1;
29 }
30
31 usb_set_intfdata(interface, dev); 19 usb_set_intfdata(interface, dev);
32 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
33 if (ret)
34 goto err_g1;
35
36 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
37 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
38 if (ret)
39 goto err_g11;
40 }
41 20
42 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 21 ret = drm_dev_register(dev, 0);
43 if (ret) 22 if (ret)
44 goto err_g2; 23 goto err_free;
45
46 if (dev->driver->load) {
47 ret = dev->driver->load(dev, 0);
48 if (ret)
49 goto err_g3;
50 }
51
52 /* setup the grouping for the legacy output */
53 ret = drm_mode_group_init_legacy_group(dev,
54 &dev->primary->mode_group);
55 if (ret)
56 goto err_g3;
57
58 list_add_tail(&dev->driver_item, &driver->device_list);
59
60 mutex_unlock(&drm_global_mutex);
61 24
62 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 25 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
63 driver->name, driver->major, driver->minor, driver->patchlevel, 26 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
65 28
66 return 0; 29 return 0;
67 30
68err_g3: 31err_free:
69 drm_put_minor(&dev->primary); 32 drm_dev_free(dev);
70err_g2:
71 if (dev->render)
72 drm_put_minor(&dev->render);
73err_g11:
74 drm_put_minor(&dev->control);
75err_g1:
76 kfree(dev);
77 mutex_unlock(&drm_global_mutex);
78 return ret; 33 return ret;
79 34
80} 35}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef78ca85..3a1e6d9b25f7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
264 .get_vblank_counter = drm_vblank_count, 264 .get_vblank_counter = drm_vblank_count,
265 .enable_vblank = exynos_drm_crtc_enable_vblank, 265 .enable_vblank = exynos_drm_crtc_enable_vblank,
266 .disable_vblank = exynos_drm_crtc_disable_vblank, 266 .disable_vblank = exynos_drm_crtc_disable_vblank,
267 .gem_init_object = exynos_drm_gem_init_object,
268 .gem_free_object = exynos_drm_gem_free_object, 267 .gem_free_object = exynos_drm_gem_free_object,
269 .gem_vm_ops = &exynos_drm_gem_vm_ops, 268 .gem_vm_ops = &exynos_drm_gem_vm_ops,
270 .dumb_create = exynos_drm_gem_dumb_create, 269 .dumb_create = exynos_drm_gem_dumb_create,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 868a14d52995..23da72b5eae9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
716{ 716{
717 /* 717 /*
718 * enable drm irq mode. 718 * enable drm irq mode.
719 * - with irq_enabled = 1, we can use the vblank feature. 719 * - with irq_enabled = true, we can use the vblank feature.
720 * 720 *
721 * P.S. note that we wouldn't use drm irq handler but 721 * P.S. note that we wouldn't use drm irq handler but
722 * just specific driver own one instead because 722 * just specific driver own one instead because
723 * drm framework supports only one irq handler. 723 * drm framework supports only one irq handler.
724 */ 724 */
725 drm_dev->irq_enabled = 1; 725 drm_dev->irq_enabled = true;
726 726
727 /* 727 /*
728 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 728 * with vblank_disable_allowed = true, vblank interrupt will be disabled
729 * by drm timer once a current process gives up ownership of 729 * by drm timer once a current process gives up ownership of
730 * vblank event.(after drm_vblank_put function is called) 730 * vblank event.(after drm_vblank_put function is called)
731 */ 731 */
732 drm_dev->vblank_disable_allowed = 1; 732 drm_dev->vblank_disable_allowed = true;
733 733
734 /* attach this sub driver to iommu mapping if supported. */ 734 /* attach this sub driver to iommu mapping if supported. */
735 if (is_drm_iommu_supported(drm_dev)) 735 if (is_drm_iommu_supported(drm_dev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 49f9cd232757..1ade191d84f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
630 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 630 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
631} 631}
632 632
633int exynos_drm_gem_init_object(struct drm_gem_object *obj)
634{
635 return 0;
636}
637
638void exynos_drm_gem_free_object(struct drm_gem_object *obj) 633void exynos_drm_gem_free_object(struct drm_gem_object *obj)
639{ 634{
640 struct exynos_drm_gem_obj *exynos_gem_obj; 635 struct exynos_drm_gem_obj *exynos_gem_obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 09555afdfe9c..702ec3abe85c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
135 unsigned int gem_handle, 135 unsigned int gem_handle,
136 struct drm_file *file_priv); 136 struct drm_file *file_priv);
137 137
138/* initialize gem object. */
139int exynos_drm_gem_init_object(struct drm_gem_object *obj);
140
141/* free gem object. */ 138/* free gem object. */
142void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); 139void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
143 140
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4400330e4449..ddaaedde173d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
101{ 101{
102 struct vidi_context *ctx = get_vidi_context(dev); 102 struct vidi_context *ctx = get_vidi_context(dev);
103 struct edid *edid; 103 struct edid *edid;
104 int edid_len;
105 104
106 /* 105 /*
107 * the edid data comes from user side and it would be set 106 * the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
112 return ERR_PTR(-EFAULT); 111 return ERR_PTR(-EFAULT);
113 } 112 }
114 113
115 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; 114 edid = drm_edid_duplicate(ctx->raw_edid);
116 edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
117 if (!edid) { 115 if (!edid) {
118 DRM_DEBUG_KMS("failed to allocate edid\n"); 116 DRM_DEBUG_KMS("failed to allocate edid\n");
119 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
385{ 383{
386 /* 384 /*
387 * enable drm irq mode. 385 * enable drm irq mode.
388 * - with irq_enabled = 1, we can use the vblank feature. 386 * - with irq_enabled = true, we can use the vblank feature.
389 * 387 *
390 * P.S. note that we wouldn't use drm irq handler but 388 * P.S. note that we wouldn't use drm irq handler but
391 * just specific driver own one instead because 389 * just specific driver own one instead because
392 * drm framework supports only one irq handler. 390 * drm framework supports only one irq handler.
393 */ 391 */
394 drm_dev->irq_enabled = 1; 392 drm_dev->irq_enabled = true;
395 393
396 /* 394 /*
397 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 395 * with vblank_disable_allowed = true, vblank interrupt will be disabled
398 * by drm timer once a current process gives up ownership of 396 * by drm timer once a current process gives up ownership of
399 * vblank event.(after drm_vblank_put function is called) 397 * vblank event.(after drm_vblank_put function is called)
400 */ 398 */
401 drm_dev->vblank_disable_allowed = 1; 399 drm_dev->vblank_disable_allowed = true;
402 400
403 return 0; 401 return 0;
404} 402}
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
485 struct exynos_drm_manager *manager; 483 struct exynos_drm_manager *manager;
486 struct exynos_drm_display_ops *display_ops; 484 struct exynos_drm_display_ops *display_ops;
487 struct drm_exynos_vidi_connection *vidi = data; 485 struct drm_exynos_vidi_connection *vidi = data;
488 int edid_len;
489 486
490 if (!vidi) { 487 if (!vidi) {
491 DRM_DEBUG_KMS("user data for vidi is null.\n"); 488 DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
524 DRM_DEBUG_KMS("edid data is invalid.\n"); 521 DRM_DEBUG_KMS("edid data is invalid.\n");
525 return -EINVAL; 522 return -EINVAL;
526 } 523 }
527 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; 524 ctx->raw_edid = drm_edid_duplicate(raw_edid);
528 ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
529 if (!ctx->raw_edid) { 525 if (!ctx->raw_edid) {
530 DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); 526 DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
531 return -ENOMEM; 527 return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f4eb43573cad..f88a1815d87c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
666 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 666 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
667 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 667 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
668 intel_dp->adapter.algo_data = &intel_dp->algo; 668 intel_dp->adapter.algo_data = &intel_dp->algo;
669 intel_dp->adapter.dev.parent = &connector->base.kdev; 669 intel_dp->adapter.dev.parent = connector->base.kdev;
670 670
671 if (is_edp(encoder)) 671 if (is_edp(encoder))
672 cdv_intel_edp_panel_vdd_on(encoder); 672 cdv_intel_edp_panel_vdd_on(encoder);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 10ae8c52d06f..e2db48a81ed0 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -29,11 +29,6 @@
29#include <drm/drm_vma_manager.h> 29#include <drm/drm_vma_manager.h>
30#include "psb_drv.h" 30#include "psb_drv.h"
31 31
32int psb_gem_init_object(struct drm_gem_object *obj)
33{
34 return -EINVAL;
35}
36
37void psb_gem_free_object(struct drm_gem_object *obj) 32void psb_gem_free_object(struct drm_gem_object *obj)
38{ 33{
39 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); 34 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index fcb4e9ff1f20..dd607f820a26 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -359,7 +359,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
359 359
360 drm_irq_install(dev); 360 drm_irq_install(dev);
361 361
362 dev->vblank_disable_allowed = 1; 362 dev->vblank_disable_allowed = true;
363 363
364 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 364 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
365 365
@@ -646,7 +646,6 @@ static struct drm_driver driver = {
646 .preclose = psb_driver_preclose, 646 .preclose = psb_driver_preclose,
647 .postclose = psb_driver_close, 647 .postclose = psb_driver_close,
648 648
649 .gem_init_object = psb_gem_init_object,
650 .gem_free_object = psb_gem_free_object, 649 .gem_free_object = psb_gem_free_object,
651 .gem_vm_ops = &psb_gem_vm_ops, 650 .gem_vm_ops = &psb_gem_vm_ops,
652 .dumb_create = psb_gem_dumb_create, 651 .dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 4535ac7708f8..0bab46bd73d2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -44,10 +44,10 @@ enum {
44 CHIP_MFLD_0130 = 3, /* Medfield */ 44 CHIP_MFLD_0130 = 3, /* Medfield */
45}; 45};
46 46
47#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) 47#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
48#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) 48#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100)
49#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) 49#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0) 50#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
51 51
52/* 52/*
53 * Driver definitions 53 * Driver definitions
@@ -837,7 +837,6 @@ extern const struct drm_connector_helper_funcs
837extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; 837extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
838 838
839/* gem.c */ 839/* gem.c */
840extern int psb_gem_init_object(struct drm_gem_object *obj);
841extern void psb_gem_free_object(struct drm_gem_object *obj); 840extern void psb_gem_free_object(struct drm_gem_object *obj);
842extern int psb_gem_get_aperture(struct drm_device *dev, void *data, 841extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
843 struct drm_file *file); 842 struct drm_file *file);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 029eccf30137..ba4830342d34 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
271 271
272 if (gma_power_is_on(dev)) 272 if (gma_power_is_on(dev))
273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
274 if (dev->vblank_enabled[0]) 274 if (dev->vblank[0].enabled)
275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
276 if (dev->vblank_enabled[1]) 276 if (dev->vblank[1].enabled)
277 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; 277 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
278 278
279 /* FIXME: Handle Medfield irq mask 279 /* FIXME: Handle Medfield irq mask
280 if (dev->vblank_enabled[1]) 280 if (dev->vblank[1].enabled)
281 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG; 281 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
282 if (dev->vblank_enabled[2]) 282 if (dev->vblank[2].enabled)
283 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG; 283 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
284 */ 284 */
285 285
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
307 307
308 if (dev->vblank_enabled[0]) 308 if (dev->vblank[0].enabled)
309 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 309 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
310 else 310 else
311 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 311 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
312 312
313 if (dev->vblank_enabled[1]) 313 if (dev->vblank[1].enabled)
314 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 314 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
315 else 315 else
316 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 316 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
317 317
318 if (dev->vblank_enabled[2]) 318 if (dev->vblank[2].enabled)
319 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 319 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
320 else 320 else
321 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 321 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
339 339
340 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 340 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
341 341
342 if (dev->vblank_enabled[0]) 342 if (dev->vblank[0].enabled)
343 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 343 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
344 344
345 if (dev->vblank_enabled[1]) 345 if (dev->vblank[1].enabled)
346 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 346 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
347 347
348 if (dev->vblank_enabled[2]) 348 if (dev->vblank[2].enabled)
349 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 349 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
350 350
351 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | 351 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
456{ 456{
457 unsigned int cur_vblank; 457 unsigned int cur_vblank;
458 int ret = 0; 458 int ret = 0;
459 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 459 DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
460 (((cur_vblank = atomic_read(counter)) 460 (((cur_vblank = atomic_read(counter))
461 - *sequence) <= (1 << 23))); 461 - *sequence) <= (1 << 23)));
462 *sequence = cur_vblank; 462 *sequence = cur_vblank;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab1892eb1074..249fdff305c6 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
944 dma->buflist[vertex->idx], 944 dma->buflist[vertex->idx],
945 vertex->discard, vertex->used); 945 vertex->discard, vertex->used);
946 946
947 atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
948 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
949 sarea_priv->last_enqueue = dev_priv->counter - 1; 947 sarea_priv->last_enqueue = dev_priv->counter - 1;
950 sarea_priv->last_dispatch = (int)hw_status[5]; 948 sarea_priv->last_dispatch = (int)hw_status[5];
951 949
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
1105 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, 1103 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1106 mc->last_render); 1104 mc->last_render);
1107 1105
1108 atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1109 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1110 sarea_priv->last_enqueue = dev_priv->counter - 1; 1106 sarea_priv->last_enqueue = dev_priv->counter - 1;
1111 sarea_priv->last_dispatch = (int)hw_status[5]; 1107 sarea_priv->last_dispatch = (int)hw_status[5];
1112 1108
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
1197 1193
1198int i810_driver_load(struct drm_device *dev, unsigned long flags) 1194int i810_driver_load(struct drm_device *dev, unsigned long flags)
1199{ 1195{
1200 /* i810 has 4 more counters */
1201 dev->counters += 4;
1202 dev->types[6] = _DRM_STAT_IRQ;
1203 dev->types[7] = _DRM_STAT_PRIMARY;
1204 dev->types[8] = _DRM_STAT_SECONDARY;
1205 dev->types[9] = _DRM_STAT_DMA;
1206
1207 pci_set_master(dev->pdev); 1196 pci_set_master(dev->pdev);
1208 1197
1209 return 0; 1198 return 0;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8449a84a0dc..65e60d26891b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
21 intel_display.o \ 21 intel_display.o \
22 intel_crt.o \ 22 intel_crt.o \
23 intel_lvds.o \ 23 intel_lvds.o \
24 intel_dsi.o \
25 intel_dsi_cmd.o \
26 intel_dsi_pll.o \
24 intel_bios.o \ 27 intel_bios.o \
25 intel_ddi.o \ 28 intel_ddi.o \
26 intel_dp.o \ 29 intel_dp.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad80100..312163379db9 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -77,17 +77,6 @@ struct intel_dvo_dev_ops {
77 struct drm_display_mode *mode); 77 struct drm_display_mode *mode);
78 78
79 /* 79 /*
80 * Callback to adjust the mode to be set in the CRTC.
81 *
82 * This allows an output to adjust the clock or even the entire set of
83 * timings, which is used for panels with fixed timings or for
84 * buses with clock limitations.
85 */
86 bool (*mode_fixup)(struct intel_dvo_device *dvo,
87 const struct drm_display_mode *mode,
88 struct drm_display_mode *adjusted_mode);
89
90 /*
91 * Callback for preparing mode changes on an output 80 * Callback for preparing mode changes on an output
92 */ 81 */
93 void (*prepare)(struct intel_dvo_device *dvo); 82 void (*prepare)(struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6f4cb5af185..61fd61969e21 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -145,6 +145,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
145 seq_printf(m, " (%s)", obj->ring->name); 145 seq_printf(m, " (%s)", obj->ring->name);
146} 146}
147 147
148static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
149{
150 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
151 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
152 seq_putc(m, ' ');
153}
154
148static int i915_gem_object_list_info(struct seq_file *m, void *data) 155static int i915_gem_object_list_info(struct seq_file *m, void *data)
149{ 156{
150 struct drm_info_node *node = (struct drm_info_node *) m->private; 157 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1442,6 +1449,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1442 struct drm_device *dev = node->minor->dev; 1449 struct drm_device *dev = node->minor->dev;
1443 drm_i915_private_t *dev_priv = dev->dev_private; 1450 drm_i915_private_t *dev_priv = dev->dev_private;
1444 struct intel_ring_buffer *ring; 1451 struct intel_ring_buffer *ring;
1452 struct i915_hw_context *ctx;
1445 int ret, i; 1453 int ret, i;
1446 1454
1447 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1455 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1468,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
1460 seq_putc(m, '\n'); 1468 seq_putc(m, '\n');
1461 } 1469 }
1462 1470
1463 for_each_ring(ring, dev_priv, i) { 1471 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1464 if (ring->default_context) { 1472 seq_puts(m, "HW context ");
1465 seq_printf(m, "HW default context %s ring ", ring->name); 1473 describe_ctx(m, ctx);
1466 describe_obj(m, ring->default_context->obj); 1474 for_each_ring(ring, dev_priv, i)
1467 seq_putc(m, '\n'); 1475 if (ring->default_context == ctx)
1468 } 1476 seq_printf(m, "(default context %s) ", ring->name);
1477
1478 describe_obj(m, ctx->obj);
1479 seq_putc(m, '\n');
1469 } 1480 }
1470 1481
1471 mutex_unlock(&dev->mode_config.mutex); 1482 mutex_unlock(&dev->mode_config.mutex);
@@ -1610,27 +1621,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1610 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1621 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1611 1622
1612 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1623 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1613 vlv_dpio_read(dev_priv, _DPIO_DIV_A)); 1624 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
1614 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1625 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1615 vlv_dpio_read(dev_priv, _DPIO_DIV_B)); 1626 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
1616 1627
1617 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1628 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1618 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1629 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
1619 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1630 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1620 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1631 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
1621 1632
1622 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1633 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1623 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1634 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
1624 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1635 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1625 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1636 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
1626 1637
1627 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", 1638 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1628 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A)); 1639 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
1629 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", 1640 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1630 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B)); 1641 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
1631 1642
1632 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1643 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1633 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1644 vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
1634 1645
1635 mutex_unlock(&dev_priv->dpio_lock); 1646 mutex_unlock(&dev_priv->dpio_lock);
1636 1647
@@ -1655,126 +1666,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1655 struct drm_info_node *node = m->private; 1666 struct drm_info_node *node = m->private;
1656 struct drm_device *dev = node->minor->dev; 1667 struct drm_device *dev = node->minor->dev;
1657 struct drm_i915_private *dev_priv = dev->dev_private; 1668 struct drm_i915_private *dev_priv = dev->dev_private;
1658 u32 psrstat, psrperf; 1669 u32 psrperf = 0;
1659 1670 bool enabled = false;
1660 if (!IS_HASWELL(dev)) {
1661 seq_puts(m, "PSR not supported on this platform\n");
1662 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1663 seq_puts(m, "PSR enabled\n");
1664 } else {
1665 seq_puts(m, "PSR disabled: ");
1666 switch (dev_priv->no_psr_reason) {
1667 case PSR_NO_SOURCE:
1668 seq_puts(m, "not supported on this platform");
1669 break;
1670 case PSR_NO_SINK:
1671 seq_puts(m, "not supported by panel");
1672 break;
1673 case PSR_MODULE_PARAM:
1674 seq_puts(m, "disabled by flag");
1675 break;
1676 case PSR_CRTC_NOT_ACTIVE:
1677 seq_puts(m, "crtc not active");
1678 break;
1679 case PSR_PWR_WELL_ENABLED:
1680 seq_puts(m, "power well enabled");
1681 break;
1682 case PSR_NOT_TILED:
1683 seq_puts(m, "not tiled");
1684 break;
1685 case PSR_SPRITE_ENABLED:
1686 seq_puts(m, "sprite enabled");
1687 break;
1688 case PSR_S3D_ENABLED:
1689 seq_puts(m, "stereo 3d enabled");
1690 break;
1691 case PSR_INTERLACED_ENABLED:
1692 seq_puts(m, "interlaced enabled");
1693 break;
1694 case PSR_HSW_NOT_DDIA:
1695 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1696 break;
1697 default:
1698 seq_puts(m, "unknown reason");
1699 }
1700 seq_puts(m, "\n");
1701 return 0;
1702 }
1703
1704 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1705
1706 seq_puts(m, "PSR Current State: ");
1707 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1708 case EDP_PSR_STATUS_STATE_IDLE:
1709 seq_puts(m, "Reset state\n");
1710 break;
1711 case EDP_PSR_STATUS_STATE_SRDONACK:
1712 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1713 break;
1714 case EDP_PSR_STATUS_STATE_SRDENT:
1715 seq_puts(m, "SRD entry\n");
1716 break;
1717 case EDP_PSR_STATUS_STATE_BUFOFF:
1718 seq_puts(m, "Wait for buffer turn off\n");
1719 break;
1720 case EDP_PSR_STATUS_STATE_BUFON:
1721 seq_puts(m, "Wait for buffer turn on\n");
1722 break;
1723 case EDP_PSR_STATUS_STATE_AUXACK:
1724 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1725 break;
1726 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1727 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1728 break;
1729 default:
1730 seq_puts(m, "Unknown\n");
1731 break;
1732 }
1733
1734 seq_puts(m, "Link Status: ");
1735 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1736 case EDP_PSR_STATUS_LINK_FULL_OFF:
1737 seq_puts(m, "Link is fully off\n");
1738 break;
1739 case EDP_PSR_STATUS_LINK_FULL_ON:
1740 seq_puts(m, "Link is fully on\n");
1741 break;
1742 case EDP_PSR_STATUS_LINK_STANDBY:
1743 seq_puts(m, "Link is in standby\n");
1744 break;
1745 default:
1746 seq_puts(m, "Unknown\n");
1747 break;
1748 }
1749
1750 seq_printf(m, "PSR Entry Count: %u\n",
1751 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1752 EDP_PSR_STATUS_COUNT_MASK);
1753
1754 seq_printf(m, "Max Sleep Timer Counter: %u\n",
1755 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1756 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1757
1758 seq_printf(m, "Had AUX error: %s\n",
1759 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1760
1761 seq_printf(m, "Sending AUX: %s\n",
1762 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1763
1764 seq_printf(m, "Sending Idle: %s\n",
1765 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
1766
1767 seq_printf(m, "Sending TP2 TP3: %s\n",
1768 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1769 1671
1770 seq_printf(m, "Sending TP1: %s\n", 1672 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1771 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1)); 1673 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1772 1674
1773 seq_printf(m, "Idle Count: %u\n", 1675 enabled = HAS_PSR(dev) &&
1774 psrstat & EDP_PSR_STATUS_IDLE_MASK); 1676 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1677 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1775 1678
1776 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK; 1679 if (HAS_PSR(dev))
1777 seq_printf(m, "Performance Counter: %u\n", psrperf); 1680 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1681 EDP_PSR_PERF_CNT_MASK;
1682 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1778 1683
1779 return 0; 1684 return 0;
1780} 1685}
@@ -1885,6 +1790,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1885 i915_ring_stop_get, i915_ring_stop_set, 1790 i915_ring_stop_get, i915_ring_stop_set,
1886 "0x%08llx\n"); 1791 "0x%08llx\n");
1887 1792
1793static int
1794i915_ring_missed_irq_get(void *data, u64 *val)
1795{
1796 struct drm_device *dev = data;
1797 struct drm_i915_private *dev_priv = dev->dev_private;
1798
1799 *val = dev_priv->gpu_error.missed_irq_rings;
1800 return 0;
1801}
1802
1803static int
1804i915_ring_missed_irq_set(void *data, u64 val)
1805{
1806 struct drm_device *dev = data;
1807 struct drm_i915_private *dev_priv = dev->dev_private;
1808 int ret;
1809
1810 /* Lock against concurrent debugfs callers */
1811 ret = mutex_lock_interruptible(&dev->struct_mutex);
1812 if (ret)
1813 return ret;
1814 dev_priv->gpu_error.missed_irq_rings = val;
1815 mutex_unlock(&dev->struct_mutex);
1816
1817 return 0;
1818}
1819
1820DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
1821 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
1822 "0x%08llx\n");
1823
1824static int
1825i915_ring_test_irq_get(void *data, u64 *val)
1826{
1827 struct drm_device *dev = data;
1828 struct drm_i915_private *dev_priv = dev->dev_private;
1829
1830 *val = dev_priv->gpu_error.test_irq_rings;
1831
1832 return 0;
1833}
1834
1835static int
1836i915_ring_test_irq_set(void *data, u64 val)
1837{
1838 struct drm_device *dev = data;
1839 struct drm_i915_private *dev_priv = dev->dev_private;
1840 int ret;
1841
1842 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
1843
1844 /* Lock against concurrent debugfs callers */
1845 ret = mutex_lock_interruptible(&dev->struct_mutex);
1846 if (ret)
1847 return ret;
1848
1849 dev_priv->gpu_error.test_irq_rings = val;
1850 mutex_unlock(&dev->struct_mutex);
1851
1852 return 0;
1853}
1854
1855DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
1856 i915_ring_test_irq_get, i915_ring_test_irq_set,
1857 "0x%08llx\n");
1858
1888#define DROP_UNBOUND 0x1 1859#define DROP_UNBOUND 0x1
1889#define DROP_BOUND 0x2 1860#define DROP_BOUND 0x2
1890#define DROP_RETIRE 0x4 1861#define DROP_RETIRE 0x4
@@ -2145,7 +2116,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
2145{ 2116{
2146 struct drm_info_node *node; 2117 struct drm_info_node *node;
2147 2118
2148 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 2119 node = kmalloc(sizeof(*node), GFP_KERNEL);
2149 if (node == NULL) { 2120 if (node == NULL) {
2150 debugfs_remove(ent); 2121 debugfs_remove(ent);
2151 return -ENOMEM; 2122 return -ENOMEM;
@@ -2278,6 +2249,8 @@ static struct i915_debugfs_files {
2278 {"i915_min_freq", &i915_min_freq_fops}, 2249 {"i915_min_freq", &i915_min_freq_fops},
2279 {"i915_cache_sharing", &i915_cache_sharing_fops}, 2250 {"i915_cache_sharing", &i915_cache_sharing_fops},
2280 {"i915_ring_stop", &i915_ring_stop_fops}, 2251 {"i915_ring_stop", &i915_ring_stop_fops},
2252 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
2253 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
2281 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 2254 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2282 {"i915_error_state", &i915_error_state_fops}, 2255 {"i915_error_state", &i915_error_state_fops},
2283 {"i915_next_seqno", &i915_next_seqno_fops}, 2256 {"i915_next_seqno", &i915_next_seqno_fops},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c27a21034a5e..b3873c945d1b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -52,7 +52,7 @@
52 intel_ring_emit(LP_RING(dev_priv), x) 52 intel_ring_emit(LP_RING(dev_priv), x)
53 53
54#define ADVANCE_LP_RING() \ 54#define ADVANCE_LP_RING() \
55 intel_ring_advance(LP_RING(dev_priv)) 55 __intel_ring_advance(LP_RING(dev_priv))
56 56
57/** 57/**
58 * Lock test for when it's just for synchronization of ring access. 58 * Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
641 641
642 if (batch->num_cliprects) { 642 if (batch->num_cliprects) {
643 cliprects = kcalloc(batch->num_cliprects, 643 cliprects = kcalloc(batch->num_cliprects,
644 sizeof(struct drm_clip_rect), 644 sizeof(*cliprects),
645 GFP_KERNEL); 645 GFP_KERNEL);
646 if (cliprects == NULL) 646 if (cliprects == NULL)
647 return -ENOMEM; 647 return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
703 703
704 if (cmdbuf->num_cliprects) { 704 if (cmdbuf->num_cliprects) {
705 cliprects = kcalloc(cmdbuf->num_cliprects, 705 cliprects = kcalloc(cmdbuf->num_cliprects,
706 sizeof(struct drm_clip_rect), GFP_KERNEL); 706 sizeof(*cliprects), GFP_KERNEL);
707 if (cliprects == NULL) { 707 if (cliprects == NULL) {
708 ret = -ENOMEM; 708 ret = -ENOMEM;
709 goto fail_batch_free; 709 goto fail_batch_free;
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
931 value = READ_BREADCRUMB(dev_priv); 931 value = READ_BREADCRUMB(dev_priv);
932 break; 932 break;
933 case I915_PARAM_CHIPSET_ID: 933 case I915_PARAM_CHIPSET_ID:
934 value = dev->pci_device; 934 value = dev->pdev->device;
935 break; 935 break;
936 case I915_PARAM_HAS_GEM: 936 case I915_PARAM_HAS_GEM:
937 value = 1; 937 value = 1;
@@ -1314,13 +1314,18 @@ static int i915_load_modeset_init(struct drm_device *dev)
1314 if (ret) 1314 if (ret)
1315 goto cleanup_gem_stolen; 1315 goto cleanup_gem_stolen;
1316 1316
1317 intel_init_power_well(dev);
1318
1319 /* Keep VGA alive until i915_disable_vga_mem() */
1320 intel_display_power_get(dev, POWER_DOMAIN_VGA);
1321
1317 /* Important: The output setup functions called by modeset_init need 1322 /* Important: The output setup functions called by modeset_init need
1318 * working irqs for e.g. gmbus and dp aux transfers. */ 1323 * working irqs for e.g. gmbus and dp aux transfers. */
1319 intel_modeset_init(dev); 1324 intel_modeset_init(dev);
1320 1325
1321 ret = i915_gem_init(dev); 1326 ret = i915_gem_init(dev);
1322 if (ret) 1327 if (ret)
1323 goto cleanup_irq; 1328 goto cleanup_power;
1324 1329
1325 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1330 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1326 1331
@@ -1328,9 +1333,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
1328 1333
1329 /* Always safe in the mode setting case. */ 1334 /* Always safe in the mode setting case. */
1330 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1335 /* FIXME: do pre/post-mode set stuff in core KMS code */
1331 dev->vblank_disable_allowed = 1; 1336 dev->vblank_disable_allowed = true;
1332 if (INTEL_INFO(dev)->num_pipes == 0) 1337 if (INTEL_INFO(dev)->num_pipes == 0) {
1338 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1333 return 0; 1339 return 0;
1340 }
1334 1341
1335 ret = intel_fbdev_init(dev); 1342 ret = intel_fbdev_init(dev);
1336 if (ret) 1343 if (ret)
@@ -1356,6 +1363,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1356 * vgacon_save_screen() works during the handover. 1363 * vgacon_save_screen() works during the handover.
1357 */ 1364 */
1358 i915_disable_vga_mem(dev); 1365 i915_disable_vga_mem(dev);
1366 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1359 1367
1360 /* Only enable hotplug handling once the fbdev is fully set up. */ 1368 /* Only enable hotplug handling once the fbdev is fully set up. */
1361 dev_priv->enable_hotplug_processing = true; 1369 dev_priv->enable_hotplug_processing = true;
@@ -1371,7 +1379,8 @@ cleanup_gem:
1371 mutex_unlock(&dev->struct_mutex); 1379 mutex_unlock(&dev->struct_mutex);
1372 i915_gem_cleanup_aliasing_ppgtt(dev); 1380 i915_gem_cleanup_aliasing_ppgtt(dev);
1373 drm_mm_takedown(&dev_priv->gtt.base.mm); 1381 drm_mm_takedown(&dev_priv->gtt.base.mm);
1374cleanup_irq: 1382cleanup_power:
1383 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1375 drm_irq_uninstall(dev); 1384 drm_irq_uninstall(dev);
1376cleanup_gem_stolen: 1385cleanup_gem_stolen:
1377 i915_gem_cleanup_stolen(dev); 1386 i915_gem_cleanup_stolen(dev);
@@ -1471,14 +1480,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1471 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1480 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1472 return -ENODEV; 1481 return -ENODEV;
1473 1482
1474 /* i915 has 4 more counters */ 1483 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1475 dev->counters += 4;
1476 dev->types[6] = _DRM_STAT_IRQ;
1477 dev->types[7] = _DRM_STAT_PRIMARY;
1478 dev->types[8] = _DRM_STAT_SECONDARY;
1479 dev->types[9] = _DRM_STAT_DMA;
1480
1481 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1482 if (dev_priv == NULL) 1484 if (dev_priv == NULL)
1483 return -ENOMEM; 1485 return -ENOMEM;
1484 1486
@@ -1552,7 +1554,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1552 1554
1553 ret = i915_gem_gtt_init(dev); 1555 ret = i915_gem_gtt_init(dev);
1554 if (ret) 1556 if (ret)
1555 goto put_bridge; 1557 goto out_regs;
1556 1558
1557 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1559 if (drm_core_check_feature(dev, DRIVER_MODESET))
1558 i915_kick_out_firmware_fb(dev_priv); 1560 i915_kick_out_firmware_fb(dev_priv);
@@ -1581,7 +1583,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1581 aperture_size); 1583 aperture_size);
1582 if (dev_priv->gtt.mappable == NULL) { 1584 if (dev_priv->gtt.mappable == NULL) {
1583 ret = -EIO; 1585 ret = -EIO;
1584 goto out_rmmap; 1586 goto out_gtt;
1585 } 1587 }
1586 1588
1587 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1589 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1655,7 +1657,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1655 ret = i915_load_modeset_init(dev); 1657 ret = i915_load_modeset_init(dev);
1656 if (ret < 0) { 1658 if (ret < 0) {
1657 DRM_ERROR("failed to init modeset\n"); 1659 DRM_ERROR("failed to init modeset\n");
1658 goto out_gem_unload; 1660 goto out_power_well;
1659 } 1661 }
1660 } else { 1662 } else {
1661 /* Start out suspended in ums mode. */ 1663 /* Start out suspended in ums mode. */
@@ -1675,6 +1677,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1675 1677
1676 return 0; 1678 return 0;
1677 1679
1680out_power_well:
1681 if (HAS_POWER_WELL(dev))
1682 i915_remove_power_well(dev);
1683 drm_vblank_cleanup(dev);
1678out_gem_unload: 1684out_gem_unload:
1679 if (dev_priv->mm.inactive_shrinker.scan_objects) 1685 if (dev_priv->mm.inactive_shrinker.scan_objects)
1680 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1686 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1688,12 +1694,17 @@ out_gem_unload:
1688out_mtrrfree: 1694out_mtrrfree:
1689 arch_phys_wc_del(dev_priv->gtt.mtrr); 1695 arch_phys_wc_del(dev_priv->gtt.mtrr);
1690 io_mapping_free(dev_priv->gtt.mappable); 1696 io_mapping_free(dev_priv->gtt.mappable);
1697out_gtt:
1698 list_del(&dev_priv->gtt.base.global_link);
1699 drm_mm_takedown(&dev_priv->gtt.base.mm);
1691 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1700 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1692out_rmmap: 1701out_regs:
1693 pci_iounmap(dev->pdev, dev_priv->regs); 1702 pci_iounmap(dev->pdev, dev_priv->regs);
1694put_bridge: 1703put_bridge:
1695 pci_dev_put(dev_priv->bridge_dev); 1704 pci_dev_put(dev_priv->bridge_dev);
1696free_priv: 1705free_priv:
1706 if (dev_priv->slab)
1707 kmem_cache_destroy(dev_priv->slab);
1697 kfree(dev_priv); 1708 kfree(dev_priv);
1698 return ret; 1709 return ret;
1699} 1710}
@@ -1783,8 +1794,8 @@ int i915_driver_unload(struct drm_device *dev)
1783 list_del(&dev_priv->gtt.base.global_link); 1794 list_del(&dev_priv->gtt.base.global_link);
1784 WARN_ON(!list_empty(&dev_priv->vm_list)); 1795 WARN_ON(!list_empty(&dev_priv->vm_list));
1785 drm_mm_takedown(&dev_priv->gtt.base.mm); 1796 drm_mm_takedown(&dev_priv->gtt.base.mm);
1786 if (dev_priv->regs != NULL) 1797
1787 pci_iounmap(dev->pdev, dev_priv->regs); 1798 drm_vblank_cleanup(dev);
1788 1799
1789 intel_teardown_gmbus(dev); 1800 intel_teardown_gmbus(dev);
1790 intel_teardown_mchbar(dev); 1801 intel_teardown_mchbar(dev);
@@ -1794,6 +1805,10 @@ int i915_driver_unload(struct drm_device *dev)
1794 1805
1795 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1806 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1796 1807
1808 intel_uncore_fini(dev);
1809 if (dev_priv->regs != NULL)
1810 pci_iounmap(dev->pdev, dev_priv->regs);
1811
1797 if (dev_priv->slab) 1812 if (dev_priv->slab)
1798 kmem_cache_destroy(dev_priv->slab); 1813 kmem_cache_destroy(dev_priv->slab);
1799 1814
@@ -1805,19 +1820,11 @@ int i915_driver_unload(struct drm_device *dev)
1805 1820
1806int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1821int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1807{ 1822{
1808 struct drm_i915_file_private *file_priv; 1823 int ret;
1809
1810 DRM_DEBUG_DRIVER("\n");
1811 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1812 if (!file_priv)
1813 return -ENOMEM;
1814
1815 file->driver_priv = file_priv;
1816
1817 spin_lock_init(&file_priv->mm.lock);
1818 INIT_LIST_HEAD(&file_priv->mm.request_list);
1819 1824
1820 idr_init(&file_priv->context_idr); 1825 ret = i915_gem_open(dev, file);
1826 if (ret)
1827 return ret;
1821 1828
1822 return 0; 1829 return 0;
1823} 1830}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 69d8ed5416c3..96f230497cbe 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -416,7 +416,7 @@ void intel_detect_pch(struct drm_device *dev)
416 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 416 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
417 /* PantherPoint is CPT compatible */ 417 /* PantherPoint is CPT compatible */
418 dev_priv->pch_type = PCH_CPT; 418 dev_priv->pch_type = PCH_CPT;
419 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 419 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
420 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 420 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
421 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 421 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
422 dev_priv->pch_type = PCH_LPT; 422 dev_priv->pch_type = PCH_LPT;
@@ -576,11 +576,24 @@ static void intel_resume_hotplug(struct drm_device *dev)
576 drm_helper_hpd_irq_event(dev); 576 drm_helper_hpd_irq_event(dev);
577} 577}
578 578
579static int __i915_drm_thaw(struct drm_device *dev) 579static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
580{ 580{
581 struct drm_i915_private *dev_priv = dev->dev_private; 581 struct drm_i915_private *dev_priv = dev->dev_private;
582 int error = 0; 582 int error = 0;
583 583
584 intel_uncore_early_sanitize(dev);
585
586 intel_uncore_sanitize(dev);
587
588 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
589 restore_gtt_mappings) {
590 mutex_lock(&dev->struct_mutex);
591 i915_gem_restore_gtt_mappings(dev);
592 mutex_unlock(&dev->struct_mutex);
593 }
594
595 intel_init_power_well(dev);
596
584 i915_restore_state(dev); 597 i915_restore_state(dev);
585 intel_opregion_setup(dev); 598 intel_opregion_setup(dev);
586 599
@@ -640,19 +653,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
640 653
641static int i915_drm_thaw(struct drm_device *dev) 654static int i915_drm_thaw(struct drm_device *dev)
642{ 655{
643 int error = 0; 656 return __i915_drm_thaw(dev, true);
644
645 intel_uncore_sanitize(dev);
646
647 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
648 mutex_lock(&dev->struct_mutex);
649 i915_gem_restore_gtt_mappings(dev);
650 mutex_unlock(&dev->struct_mutex);
651 }
652
653 __i915_drm_thaw(dev);
654
655 return error;
656} 657}
657 658
658int i915_resume(struct drm_device *dev) 659int i915_resume(struct drm_device *dev)
@@ -668,20 +669,12 @@ int i915_resume(struct drm_device *dev)
668 669
669 pci_set_master(dev->pdev); 670 pci_set_master(dev->pdev);
670 671
671 intel_uncore_sanitize(dev);
672
673 /* 672 /*
674 * Platforms with opregion should have sane BIOS, older ones (gen3 and 673 * Platforms with opregion should have sane BIOS, older ones (gen3 and
675 * earlier) need this since the BIOS might clear all our scratch PTEs. 674 * earlier) need to restore the GTT mappings since the BIOS might clear
675 * all our scratch PTEs.
676 */ 676 */
677 if (drm_core_check_feature(dev, DRIVER_MODESET) && 677 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
678 !dev_priv->opregion.header) {
679 mutex_lock(&dev->struct_mutex);
680 i915_gem_restore_gtt_mappings(dev);
681 mutex_unlock(&dev->struct_mutex);
682 }
683
684 ret = __i915_drm_thaw(dev);
685 if (ret) 678 if (ret)
686 return ret; 679 return ret;
687 680
@@ -719,24 +712,19 @@ int i915_reset(struct drm_device *dev)
719 712
720 simulated = dev_priv->gpu_error.stop_rings != 0; 713 simulated = dev_priv->gpu_error.stop_rings != 0;
721 714
722 if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) { 715 ret = intel_gpu_reset(dev);
723 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 716
724 ret = -ENODEV; 717 /* Also reset the gpu hangman. */
725 } else { 718 if (simulated) {
726 ret = intel_gpu_reset(dev); 719 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
727 720 dev_priv->gpu_error.stop_rings = 0;
728 /* Also reset the gpu hangman. */ 721 if (ret == -ENODEV) {
729 if (simulated) { 722 DRM_ERROR("Reset not implemented, but ignoring "
730 DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); 723 "error for simulated gpu hangs\n");
731 dev_priv->gpu_error.stop_rings = 0; 724 ret = 0;
732 if (ret == -ENODEV) { 725 }
733 DRM_ERROR("Reset not implemented, but ignoring "
734 "error for simulated gpu hangs\n");
735 ret = 0;
736 }
737 } else
738 dev_priv->gpu_error.last_reset = get_seconds();
739 } 726 }
727
740 if (ret) { 728 if (ret) {
741 DRM_ERROR("Failed to reset chip.\n"); 729 DRM_ERROR("Failed to reset chip.\n");
742 mutex_unlock(&dev->struct_mutex); 730 mutex_unlock(&dev->struct_mutex);
@@ -799,6 +787,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
799 struct intel_device_info *intel_info = 787 struct intel_device_info *intel_info =
800 (struct intel_device_info *) ent->driver_data; 788 (struct intel_device_info *) ent->driver_data;
801 789
790 if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
791 DRM_INFO("This hardware requires preliminary hardware support.\n"
792 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
793 return -ENODEV;
794 }
795
802 /* Only bind to function 0 of the device. Early generations 796 /* Only bind to function 0 of the device. Early generations
803 * used function 1 as a placeholder for multi-head. This causes 797 * used function 1 as a placeholder for multi-head. This causes
804 * us confusion instead, especially on the systems where both 798 * us confusion instead, especially on the systems where both
@@ -946,7 +940,6 @@ static struct drm_driver driver = {
946 .debugfs_init = i915_debugfs_init, 940 .debugfs_init = i915_debugfs_init,
947 .debugfs_cleanup = i915_debugfs_cleanup, 941 .debugfs_cleanup = i915_debugfs_cleanup,
948#endif 942#endif
949 .gem_init_object = i915_gem_init_object,
950 .gem_free_object = i915_gem_free_object, 943 .gem_free_object = i915_gem_free_object,
951 .gem_vm_ops = &i915_gem_vm_ops, 944 .gem_vm_ops = &i915_gem_vm_ops,
952 945
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 35874b3a86dc..6a5b7ab0c3fa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -99,6 +99,7 @@ enum intel_display_power_domain {
99 POWER_DOMAIN_TRANSCODER_B, 99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C, 100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, 101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
102 POWER_DOMAIN_VGA,
102}; 103};
103 104
104#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 105#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
@@ -225,6 +226,8 @@ struct intel_opregion {
225 struct opregion_header __iomem *header; 226 struct opregion_header __iomem *header;
226 struct opregion_acpi __iomem *acpi; 227 struct opregion_acpi __iomem *acpi;
227 struct opregion_swsci __iomem *swsci; 228 struct opregion_swsci __iomem *swsci;
229 u32 swsci_gbda_sub_functions;
230 u32 swsci_sbcb_sub_functions;
228 struct opregion_asle __iomem *asle; 231 struct opregion_asle __iomem *asle;
229 void __iomem *vbt; 232 void __iomem *vbt;
230 u32 __iomem *lid_state; 233 u32 __iomem *lid_state;
@@ -321,11 +324,13 @@ struct drm_i915_error_state {
321 u32 dirty:1; 324 u32 dirty:1;
322 u32 purgeable:1; 325 u32 purgeable:1;
323 s32 ring:4; 326 s32 ring:4;
324 u32 cache_level:2; 327 u32 cache_level:3;
325 } **active_bo, **pinned_bo; 328 } **active_bo, **pinned_bo;
326 u32 *active_bo_count, *pinned_bo_count; 329 u32 *active_bo_count, *pinned_bo_count;
327 struct intel_overlay_error_state *overlay; 330 struct intel_overlay_error_state *overlay;
328 struct intel_display_error_state *display; 331 struct intel_display_error_state *display;
332 int hangcheck_score[I915_NUM_RINGS];
333 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
329}; 334};
330 335
331struct intel_crtc_config; 336struct intel_crtc_config;
@@ -357,7 +362,7 @@ struct drm_i915_display_funcs {
357 int target, int refclk, 362 int target, int refclk,
358 struct dpll *match_clock, 363 struct dpll *match_clock,
359 struct dpll *best_clock); 364 struct dpll *best_clock);
360 void (*update_wm)(struct drm_device *dev); 365 void (*update_wm)(struct drm_crtc *crtc);
361 void (*update_sprite_wm)(struct drm_plane *plane, 366 void (*update_sprite_wm)(struct drm_plane *plane,
362 struct drm_crtc *crtc, 367 struct drm_crtc *crtc,
363 uint32_t sprite_width, int pixel_size, 368 uint32_t sprite_width, int pixel_size,
@@ -367,7 +372,6 @@ struct drm_i915_display_funcs {
367 * fills out the pipe-config with the hw state. */ 372 * fills out the pipe-config with the hw state. */
368 bool (*get_pipe_config)(struct intel_crtc *, 373 bool (*get_pipe_config)(struct intel_crtc *,
369 struct intel_crtc_config *); 374 struct intel_crtc_config *);
370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
371 int (*crtc_mode_set)(struct drm_crtc *crtc, 375 int (*crtc_mode_set)(struct drm_crtc *crtc,
372 int x, int y, 376 int x, int y,
373 struct drm_framebuffer *old_fb); 377 struct drm_framebuffer *old_fb);
@@ -404,6 +408,8 @@ struct intel_uncore {
404 408
405 unsigned fifo_count; 409 unsigned fifo_count;
406 unsigned forcewake_count; 410 unsigned forcewake_count;
411
412 struct delayed_work force_wake_work;
407}; 413};
408 414
409#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 415#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,6 +426,7 @@ struct intel_uncore {
420 func(is_ivybridge) sep \ 426 func(is_ivybridge) sep \
421 func(is_valleyview) sep \ 427 func(is_valleyview) sep \
422 func(is_haswell) sep \ 428 func(is_haswell) sep \
429 func(is_preliminary) sep \
423 func(has_force_wake) sep \ 430 func(has_force_wake) sep \
424 func(has_fbc) sep \ 431 func(has_fbc) sep \
425 func(has_pipe_cxsr) sep \ 432 func(has_pipe_cxsr) sep \
@@ -568,6 +575,13 @@ struct i915_vma {
568 /** This vma's place in the batchbuffer or on the eviction list */ 575 /** This vma's place in the batchbuffer or on the eviction list */
569 struct list_head exec_list; 576 struct list_head exec_list;
570 577
578 /**
579 * Used for performing relocations during execbuffer insertion.
580 */
581 struct hlist_node exec_node;
582 unsigned long exec_handle;
583 struct drm_i915_gem_exec_object2 *exec_entry;
584
571}; 585};
572 586
573struct i915_ctx_hang_stats { 587struct i915_ctx_hang_stats {
@@ -576,6 +590,12 @@ struct i915_ctx_hang_stats {
576 590
577 /* This context had batch active when hang was declared */ 591 /* This context had batch active when hang was declared */
578 unsigned batch_active; 592 unsigned batch_active;
593
594 /* Time when this context was last blamed for a GPU reset */
595 unsigned long guilty_ts;
596
597 /* This context is banned to submit more work */
598 bool banned;
579}; 599};
580 600
581/* This must match up with the value previously used for execbuf2.rsvd1. */ 601/* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -584,10 +604,13 @@ struct i915_hw_context {
584 struct kref ref; 604 struct kref ref;
585 int id; 605 int id;
586 bool is_initialized; 606 bool is_initialized;
607 uint8_t remap_slice;
587 struct drm_i915_file_private *file_priv; 608 struct drm_i915_file_private *file_priv;
588 struct intel_ring_buffer *ring; 609 struct intel_ring_buffer *ring;
589 struct drm_i915_gem_object *obj; 610 struct drm_i915_gem_object *obj;
590 struct i915_ctx_hang_stats hang_stats; 611 struct i915_ctx_hang_stats hang_stats;
612
613 struct list_head link;
591}; 614};
592 615
593struct i915_fbc { 616struct i915_fbc {
@@ -621,17 +644,9 @@ struct i915_fbc {
621 } no_fbc_reason; 644 } no_fbc_reason;
622}; 645};
623 646
624enum no_psr_reason { 647struct i915_psr {
625 PSR_NO_SOURCE, /* Not supported on platform */ 648 bool sink_support;
626 PSR_NO_SINK, /* Not supported by panel */ 649 bool source_ok;
627 PSR_MODULE_PARAM,
628 PSR_CRTC_NOT_ACTIVE,
629 PSR_PWR_WELL_ENABLED,
630 PSR_NOT_TILED,
631 PSR_SPRITE_ENABLED,
632 PSR_S3D_ENABLED,
633 PSR_INTERLACED_ENABLED,
634 PSR_HSW_NOT_DDIA,
635}; 650};
636 651
637enum intel_pch { 652enum intel_pch {
@@ -821,17 +836,19 @@ struct intel_gen6_power_mgmt {
821 struct work_struct work; 836 struct work_struct work;
822 u32 pm_iir; 837 u32 pm_iir;
823 838
824 /* On vlv we need to manually drop to Vmin with a delayed work. */
825 struct delayed_work vlv_work;
826
827 /* The below variables an all the rps hw state are protected by 839 /* The below variables an all the rps hw state are protected by
828 * dev->struct mutext. */ 840 * dev->struct mutext. */
829 u8 cur_delay; 841 u8 cur_delay;
830 u8 min_delay; 842 u8 min_delay;
831 u8 max_delay; 843 u8 max_delay;
832 u8 rpe_delay; 844 u8 rpe_delay;
845 u8 rp1_delay;
846 u8 rp0_delay;
833 u8 hw_max; 847 u8 hw_max;
834 848
849 int last_adj;
850 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
851
835 struct delayed_work delayed_resume_work; 852 struct delayed_work delayed_resume_work;
836 853
837 /* 854 /*
@@ -900,9 +917,11 @@ struct i915_ums_state {
900 int mm_suspended; 917 int mm_suspended;
901}; 918};
902 919
920#define MAX_L3_SLICES 2
903struct intel_l3_parity { 921struct intel_l3_parity {
904 u32 *remap_info; 922 u32 *remap_info[MAX_L3_SLICES];
905 struct work_struct error_work; 923 struct work_struct error_work;
924 int which_slice;
906}; 925};
907 926
908struct i915_gem_mm { 927struct i915_gem_mm {
@@ -940,6 +959,15 @@ struct i915_gem_mm {
940 struct delayed_work retire_work; 959 struct delayed_work retire_work;
941 960
942 /** 961 /**
962 * When we detect an idle GPU, we want to turn on
963 * powersaving features. So once we see that there
964 * are no more requests outstanding and no more
965 * arrive within a small period of time, we fire
966 * off the idle_work.
967 */
968 struct delayed_work idle_work;
969
970 /**
943 * Are we in a non-interruptible section of code like 971 * Are we in a non-interruptible section of code like
944 * modesetting? 972 * modesetting?
945 */ 973 */
@@ -977,6 +1005,9 @@ struct i915_gpu_error {
977 /* For hangcheck timer */ 1005 /* For hangcheck timer */
978#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1006#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
979#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1007#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1008 /* Hang gpu twice in this window and your context gets banned */
1009#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1010
980 struct timer_list hangcheck_timer; 1011 struct timer_list hangcheck_timer;
981 1012
982 /* For reset and error_state handling. */ 1013 /* For reset and error_state handling. */
@@ -985,7 +1016,8 @@ struct i915_gpu_error {
985 struct drm_i915_error_state *first_error; 1016 struct drm_i915_error_state *first_error;
986 struct work_struct work; 1017 struct work_struct work;
987 1018
988 unsigned long last_reset; 1019
1020 unsigned long missed_irq_rings;
989 1021
990 /** 1022 /**
991 * State variable and reset counter controlling the reset flow 1023 * State variable and reset counter controlling the reset flow
@@ -1025,6 +1057,9 @@ struct i915_gpu_error {
1025 1057
1026 /* For gpu hang simulation. */ 1058 /* For gpu hang simulation. */
1027 unsigned int stop_rings; 1059 unsigned int stop_rings;
1060
1061 /* For missed irq/seqno simulation. */
1062 unsigned int test_irq_rings;
1028}; 1063};
1029 1064
1030enum modeset_restore { 1065enum modeset_restore {
@@ -1033,6 +1068,14 @@ enum modeset_restore {
1033 MODESET_SUSPENDED, 1068 MODESET_SUSPENDED,
1034}; 1069};
1035 1070
1071struct ddi_vbt_port_info {
1072 uint8_t hdmi_level_shift;
1073
1074 uint8_t supports_dvi:1;
1075 uint8_t supports_hdmi:1;
1076 uint8_t supports_dp:1;
1077};
1078
1036struct intel_vbt_data { 1079struct intel_vbt_data {
1037 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1080 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1038 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1081 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1058,10 +1101,17 @@ struct intel_vbt_data {
1058 int edp_bpp; 1101 int edp_bpp;
1059 struct edp_power_seq edp_pps; 1102 struct edp_power_seq edp_pps;
1060 1103
1104 /* MIPI DSI */
1105 struct {
1106 u16 panel_id;
1107 } dsi;
1108
1061 int crt_ddc_pin; 1109 int crt_ddc_pin;
1062 1110
1063 int child_dev_num; 1111 int child_dev_num;
1064 struct child_device_config *child_dev; 1112 union child_device_config *child_dev;
1113
1114 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1065}; 1115};
1066 1116
1067enum intel_ddb_partitioning { 1117enum intel_ddb_partitioning {
@@ -1298,7 +1348,7 @@ typedef struct drm_i915_private {
1298 /* Haswell power well */ 1348 /* Haswell power well */
1299 struct i915_power_well power_well; 1349 struct i915_power_well power_well;
1300 1350
1301 enum no_psr_reason no_psr_reason; 1351 struct i915_psr psr;
1302 1352
1303 struct i915_gpu_error gpu_error; 1353 struct i915_gpu_error gpu_error;
1304 1354
@@ -1318,6 +1368,7 @@ typedef struct drm_i915_private {
1318 1368
1319 bool hw_contexts_disabled; 1369 bool hw_contexts_disabled;
1320 uint32_t hw_context_size; 1370 uint32_t hw_context_size;
1371 struct list_head context_list;
1321 1372
1322 u32 fdi_rx_config; 1373 u32 fdi_rx_config;
1323 1374
@@ -1398,8 +1449,6 @@ struct drm_i915_gem_object {
1398 struct list_head ring_list; 1449 struct list_head ring_list;
1399 /** Used in execbuf to temporarily hold a ref */ 1450 /** Used in execbuf to temporarily hold a ref */
1400 struct list_head obj_exec_link; 1451 struct list_head obj_exec_link;
1401 /** This object's place in the batchbuffer or on the eviction list */
1402 struct list_head exec_list;
1403 1452
1404 /** 1453 /**
1405 * This is set if the object is on the active lists (has pending 1454 * This is set if the object is on the active lists (has pending
@@ -1485,13 +1534,6 @@ struct drm_i915_gem_object {
1485 void *dma_buf_vmapping; 1534 void *dma_buf_vmapping;
1486 int vmapping_count; 1535 int vmapping_count;
1487 1536
1488 /**
1489 * Used for performing relocations during execbuffer insertion.
1490 */
1491 struct hlist_node exec_node;
1492 unsigned long exec_handle;
1493 struct drm_i915_gem_exec_object2 *exec_entry;
1494
1495 struct intel_ring_buffer *ring; 1537 struct intel_ring_buffer *ring;
1496 1538
1497 /** Breadcrumb of last rendering to the buffer. */ 1539 /** Breadcrumb of last rendering to the buffer. */
@@ -1558,48 +1600,55 @@ struct drm_i915_gem_request {
1558}; 1600};
1559 1601
1560struct drm_i915_file_private { 1602struct drm_i915_file_private {
1603 struct drm_i915_private *dev_priv;
1604
1561 struct { 1605 struct {
1562 spinlock_t lock; 1606 spinlock_t lock;
1563 struct list_head request_list; 1607 struct list_head request_list;
1608 struct delayed_work idle_work;
1564 } mm; 1609 } mm;
1565 struct idr context_idr; 1610 struct idr context_idr;
1566 1611
1567 struct i915_ctx_hang_stats hang_stats; 1612 struct i915_ctx_hang_stats hang_stats;
1613 atomic_t rps_wait_boost;
1568}; 1614};
1569 1615
1570#define INTEL_INFO(dev) (to_i915(dev)->info) 1616#define INTEL_INFO(dev) (to_i915(dev)->info)
1571 1617
1572#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1618#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1573#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1619#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
1574#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1620#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1575#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1621#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
1576#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1622#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1577#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1623#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
1578#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1624#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
1579#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1625#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1580#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1626#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1581#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1627#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1582#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1628#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
1583#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1629#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1584#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1630#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
1585#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1631#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
1586#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1632#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1587#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1633#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1588#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1634#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
1589#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1635#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1590#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1636#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
1591 (dev)->pci_device == 0x0152 || \ 1637 (dev)->pdev->device == 0x0152 || \
1592 (dev)->pci_device == 0x015a) 1638 (dev)->pdev->device == 0x015a)
1593#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ 1639#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
1594 (dev)->pci_device == 0x0106 || \ 1640 (dev)->pdev->device == 0x0106 || \
1595 (dev)->pci_device == 0x010A) 1641 (dev)->pdev->device == 0x010A)
1596#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1642#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1597#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1643#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1598#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1644#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1599#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1645#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1600 ((dev)->pci_device & 0xFF00) == 0x0C00) 1646 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1601#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1647#define IS_ULT(dev) (IS_HASWELL(dev) && \
1602 ((dev)->pci_device & 0xFF00) == 0x0A00) 1648 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1649#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1650 ((dev)->pdev->device & 0x00F0) == 0x0020)
1651#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1603 1652
1604/* 1653/*
1605 * The genX designation typically refers to the render engine, so render 1654 * The genX designation typically refers to the render engine, so render
@@ -1638,7 +1687,6 @@ struct drm_i915_file_private {
1638#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1687#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1639#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1688#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1640#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1689#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1641#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1642#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1690#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1643#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1691#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1644 1692
@@ -1651,6 +1699,7 @@ struct drm_i915_file_private {
1651#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1699#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1652#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1700#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1653#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1701#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1702#define HAS_PSR(dev) (IS_HASWELL(dev))
1654 1703
1655#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1704#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1656#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1705#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1668,7 +1717,9 @@ struct drm_i915_file_private {
1668 1717
1669#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1718#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1670 1719
1671#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1720/* DPF == dynamic parity feature */
1721#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1722#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
1672 1723
1673#define GT_FREQUENCY_MULTIPLIER 50 1724#define GT_FREQUENCY_MULTIPLIER 50
1674 1725
@@ -1765,6 +1816,7 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
1765extern void intel_uncore_init(struct drm_device *dev); 1816extern void intel_uncore_init(struct drm_device *dev);
1766extern void intel_uncore_clear_errors(struct drm_device *dev); 1817extern void intel_uncore_clear_errors(struct drm_device *dev);
1767extern void intel_uncore_check_errors(struct drm_device *dev); 1818extern void intel_uncore_check_errors(struct drm_device *dev);
1819extern void intel_uncore_fini(struct drm_device *dev);
1768 1820
1769void 1821void
1770i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1822i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1822,14 +1874,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1822void i915_gem_load(struct drm_device *dev); 1874void i915_gem_load(struct drm_device *dev);
1823void *i915_gem_object_alloc(struct drm_device *dev); 1875void *i915_gem_object_alloc(struct drm_device *dev);
1824void i915_gem_object_free(struct drm_i915_gem_object *obj); 1876void i915_gem_object_free(struct drm_i915_gem_object *obj);
1825int i915_gem_init_object(struct drm_gem_object *obj);
1826void i915_gem_object_init(struct drm_i915_gem_object *obj, 1877void i915_gem_object_init(struct drm_i915_gem_object *obj,
1827 const struct drm_i915_gem_object_ops *ops); 1878 const struct drm_i915_gem_object_ops *ops);
1828struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1879struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1829 size_t size); 1880 size_t size);
1830void i915_gem_free_object(struct drm_gem_object *obj); 1881void i915_gem_free_object(struct drm_gem_object *obj);
1831struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
1832 struct i915_address_space *vm);
1833void i915_gem_vma_destroy(struct i915_vma *vma); 1882void i915_gem_vma_destroy(struct i915_vma *vma);
1834 1883
1835int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1884int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1868,9 +1917,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1868int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1917int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1869int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1918int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1870 struct intel_ring_buffer *to); 1919 struct intel_ring_buffer *to);
1871void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1920void i915_vma_move_to_active(struct i915_vma *vma,
1872 struct intel_ring_buffer *ring); 1921 struct intel_ring_buffer *ring);
1873
1874int i915_gem_dumb_create(struct drm_file *file_priv, 1922int i915_gem_dumb_create(struct drm_file *file_priv,
1875 struct drm_device *dev, 1923 struct drm_device *dev,
1876 struct drm_mode_create_dumb *args); 1924 struct drm_mode_create_dumb *args);
@@ -1911,7 +1959,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1911 } 1959 }
1912} 1960}
1913 1961
1914void i915_gem_retire_requests(struct drm_device *dev); 1962bool i915_gem_retire_requests(struct drm_device *dev);
1915void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1963void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1916int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 1964int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1917 bool interruptible); 1965 bool interruptible);
@@ -1931,7 +1979,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1931int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1979int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1932int __must_check i915_gem_init(struct drm_device *dev); 1980int __must_check i915_gem_init(struct drm_device *dev);
1933int __must_check i915_gem_init_hw(struct drm_device *dev); 1981int __must_check i915_gem_init_hw(struct drm_device *dev);
1934void i915_gem_l3_remap(struct drm_device *dev); 1982int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
1935void i915_gem_init_swizzling(struct drm_device *dev); 1983void i915_gem_init_swizzling(struct drm_device *dev);
1936void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1984void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1937int __must_check i915_gpu_idle(struct drm_device *dev); 1985int __must_check i915_gpu_idle(struct drm_device *dev);
@@ -1962,6 +2010,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
1962void i915_gem_detach_phys_object(struct drm_device *dev, 2010void i915_gem_detach_phys_object(struct drm_device *dev,
1963 struct drm_i915_gem_object *obj); 2011 struct drm_i915_gem_object *obj);
1964void i915_gem_free_all_phys_object(struct drm_device *dev); 2012void i915_gem_free_all_phys_object(struct drm_device *dev);
2013int i915_gem_open(struct drm_device *dev, struct drm_file *file);
1965void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2014void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1966 2015
1967uint32_t 2016uint32_t
@@ -1993,6 +2042,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
1993struct i915_vma * 2042struct i915_vma *
1994i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2043i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
1995 struct i915_address_space *vm); 2044 struct i915_address_space *vm);
2045
2046struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2047
1996/* Some GGTT VM helpers */ 2048/* Some GGTT VM helpers */
1997#define obj_to_ggtt(obj) \ 2049#define obj_to_ggtt(obj) \
1998 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2050 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2029,7 +2081,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2029 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, 2081 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2030 map_and_fenceable, nonblocking); 2082 map_and_fenceable, nonblocking);
2031} 2083}
2032#undef obj_to_ggtt
2033 2084
2034/* i915_gem_context.c */ 2085/* i915_gem_context.c */
2035void i915_gem_context_init(struct drm_device *dev); 2086void i915_gem_context_init(struct drm_device *dev);
@@ -2090,6 +2141,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2090 unsigned cache_level, 2141 unsigned cache_level,
2091 bool mappable, 2142 bool mappable,
2092 bool nonblock); 2143 bool nonblock);
2144int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2093int i915_gem_evict_everything(struct drm_device *dev); 2145int i915_gem_evict_everything(struct drm_device *dev);
2094 2146
2095/* i915_gem_stolen.c */ 2147/* i915_gem_stolen.c */
@@ -2182,15 +2234,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2182extern void intel_i2c_reset(struct drm_device *dev); 2234extern void intel_i2c_reset(struct drm_device *dev);
2183 2235
2184/* intel_opregion.c */ 2236/* intel_opregion.c */
2237struct intel_encoder;
2185extern int intel_opregion_setup(struct drm_device *dev); 2238extern int intel_opregion_setup(struct drm_device *dev);
2186#ifdef CONFIG_ACPI 2239#ifdef CONFIG_ACPI
2187extern void intel_opregion_init(struct drm_device *dev); 2240extern void intel_opregion_init(struct drm_device *dev);
2188extern void intel_opregion_fini(struct drm_device *dev); 2241extern void intel_opregion_fini(struct drm_device *dev);
2189extern void intel_opregion_asle_intr(struct drm_device *dev); 2242extern void intel_opregion_asle_intr(struct drm_device *dev);
2243extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2244 bool enable);
2245extern int intel_opregion_notify_adapter(struct drm_device *dev,
2246 pci_power_t state);
2190#else 2247#else
2191static inline void intel_opregion_init(struct drm_device *dev) { return; } 2248static inline void intel_opregion_init(struct drm_device *dev) { return; }
2192static inline void intel_opregion_fini(struct drm_device *dev) { return; } 2249static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2193static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 2250static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
2251static inline int
2252intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2253{
2254 return 0;
2255}
2256static inline int
2257intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2258{
2259 return 0;
2260}
2194#endif 2261#endif
2195 2262
2196/* intel_acpi.c */ 2263/* intel_acpi.c */
@@ -2252,8 +2319,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
2252u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); 2319u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2253void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); 2320void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2254u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 2321u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2255u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg); 2322u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2256void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val); 2323void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2324u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2325void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2326u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2327void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2328u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2329void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2330u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2331void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
2257u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 2332u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2258 enum intel_sbi_destination destination); 2333 enum intel_sbi_destination destination);
2259void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 2334void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdfb9da0e4ce..13c885d66383 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force); 42 bool force);
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
44i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 47i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
45 struct i915_address_space *vm, 48 struct i915_address_space *vm,
46 unsigned alignment, 49 unsigned alignment,
@@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
432 * optimizes for the case when the gpu will dirty the data 435 * optimizes for the case when the gpu will dirty the data
433 * anyway again before the next pread happens. */ 436 * anyway again before the next pread happens. */
434 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); 437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
435 if (i915_gem_obj_bound_any(obj)) { 438 ret = i915_gem_object_wait_rendering(obj, true);
436 ret = i915_gem_object_set_to_gtt_domain(obj, false); 439 if (ret)
437 if (ret) 440 return ret;
438 return ret;
439 }
440 } 441 }
441 442
442 ret = i915_gem_object_get_pages(obj); 443 ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
748 * optimizes for the case when the gpu will use the data 749 * optimizes for the case when the gpu will use the data
749 * right away and we therefore have to clflush anyway. */ 750 * right away and we therefore have to clflush anyway. */
750 needs_clflush_after = cpu_write_needs_clflush(obj); 751 needs_clflush_after = cpu_write_needs_clflush(obj);
751 if (i915_gem_obj_bound_any(obj)) { 752 ret = i915_gem_object_wait_rendering(obj, false);
752 ret = i915_gem_object_set_to_gtt_domain(obj, true); 753 if (ret)
753 if (ret) 754 return ret;
754 return ret;
755 }
756 } 755 }
757 /* Same trick applies to invalidate partially written cachelines read 756 /* Same trick applies to invalidate partially written cachelines read
758 * before writing. */ 757 * before writing. */
@@ -966,12 +965,31 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
966 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
967 966
968 ret = 0; 967 ret = 0;
969 if (seqno == ring->outstanding_lazy_request) 968 if (seqno == ring->outstanding_lazy_seqno)
970 ret = i915_add_request(ring, NULL); 969 ret = i915_add_request(ring, NULL);
971 970
972 return ret; 971 return ret;
973} 972}
974 973
974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
975/** 993/**
976 * __wait_seqno - wait until execution of seqno has finished 994 * __wait_seqno - wait until execution of seqno has finished
977 * @ring: the ring expected to report seqno 995 * @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
992 */ 1010 */
993static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
994 unsigned reset_counter, 1012 unsigned reset_counter,
995 bool interruptible, struct timespec *timeout) 1013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
996{ 1016{
997 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
998 struct timespec before, now, wait_time={1,0}; 1018 struct timespec before, now;
999 unsigned long timeout_jiffies; 1019 DEFINE_WAIT(wait);
1000 long end; 1020 long timeout_jiffies;
1001 bool wait_forever = true;
1002 int ret; 1021 int ret;
1003 1022
1004 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); 1023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1006 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1007 return 0; 1026 return 0;
1008 1027
1009 trace_i915_gem_request_wait_begin(ring, seqno); 1028 timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
1010 1029
1011 if (timeout != NULL) { 1030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1012 wait_time = *timeout; 1031 gen6_rps_boost(dev_priv);
1013 wait_forever = false; 1032 if (file_priv)
1033 mod_delayed_work(dev_priv->wq,
1034 &file_priv->mm.idle_work,
1035 msecs_to_jiffies(100));
1014 } 1036 }
1015 1037
1016 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); 1038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
1017 1039 WARN_ON(!ring->irq_get(ring)))
1018 if (WARN_ON(!ring->irq_get(ring)))
1019 return -ENODEV; 1040 return -ENODEV;
1020 1041
1021 /* Record current time in case interrupted by signal, or wedged * */ 1042 /* Record current time in case interrupted by signal, or wedged */
1043 trace_i915_gem_request_wait_begin(ring, seqno);
1022 getrawmonotonic(&before); 1044 getrawmonotonic(&before);
1045 for (;;) {
1046 struct timer_list timer;
1047 unsigned long expire;
1023 1048
1024#define EXIT_COND \ 1049 prepare_to_wait(&ring->irq_queue, &wait,
1025 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ 1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1026 i915_reset_in_progress(&dev_priv->gpu_error) || \
1027 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1028 do {
1029 if (interruptible)
1030 end = wait_event_interruptible_timeout(ring->irq_queue,
1031 EXIT_COND,
1032 timeout_jiffies);
1033 else
1034 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1035 timeout_jiffies);
1036 1051
1037 /* We need to check whether any gpu reset happened in between 1052 /* We need to check whether any gpu reset happened in between
1038 * the caller grabbing the seqno and now ... */ 1053 * the caller grabbing the seqno and now ... */
1039 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 1054 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1040 end = -EAGAIN; 1055 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1056 * is truely gone. */
1057 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 if (ret == 0)
1059 ret = -EAGAIN;
1060 break;
1061 }
1041 1062
1042 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely 1063 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1043 * gone. */ 1064 ret = 0;
1044 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1065 break;
1045 if (ret) 1066 }
1046 end = ret; 1067
1047 } while (end == 0 && wait_forever); 1068 if (interruptible && signal_pending(current)) {
1069 ret = -ERESTARTSYS;
1070 break;
1071 }
1072
1073 if (timeout_jiffies <= 0) {
1074 ret = -ETIME;
1075 break;
1076 }
1077
1078 timer.function = NULL;
1079 if (timeout || missed_irq(dev_priv, ring)) {
1080 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1081 expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
1082 mod_timer(&timer, expire);
1083 }
1084
1085 schedule();
1048 1086
1087 if (timeout)
1088 timeout_jiffies = expire - jiffies;
1089
1090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
1049 getrawmonotonic(&now); 1095 getrawmonotonic(&now);
1096 trace_i915_gem_request_wait_end(ring, seqno);
1050 1097
1051 ring->irq_put(ring); 1098 ring->irq_put(ring);
1052 trace_i915_gem_request_wait_end(ring, seqno); 1099
1053#undef EXIT_COND 1100 finish_wait(&ring->irq_queue, &wait);
1054 1101
1055 if (timeout) { 1102 if (timeout) {
1056 struct timespec sleep_time = timespec_sub(now, before); 1103 struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1059 set_normalized_timespec(timeout, 0, 0); 1106 set_normalized_timespec(timeout, 0, 0);
1060 } 1107 }
1061 1108
1062 switch (end) { 1109 return ret;
1063 case -EIO:
1064 case -EAGAIN: /* Wedged */
1065 case -ERESTARTSYS: /* Signal */
1066 return (int)end;
1067 case 0: /* Timeout */
1068 return -ETIME;
1069 default: /* Completed */
1070 WARN_ON(end < 0); /* We're not aware of other errors */
1071 return 0;
1072 }
1073} 1110}
1074 1111
1075/** 1112/**
@@ -1097,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1097 1134
1098 return __wait_seqno(ring, seqno, 1135 return __wait_seqno(ring, seqno,
1099 atomic_read(&dev_priv->gpu_error.reset_counter), 1136 atomic_read(&dev_priv->gpu_error.reset_counter),
1100 interruptible, NULL); 1137 interruptible, NULL, NULL);
1101} 1138}
1102 1139
1103static int 1140static int
@@ -1147,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1147 */ 1184 */
1148static __must_check int 1185static __must_check int
1149i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1186i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1187 struct drm_file *file,
1150 bool readonly) 1188 bool readonly)
1151{ 1189{
1152 struct drm_device *dev = obj->base.dev; 1190 struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1173 1211
1174 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1175 mutex_unlock(&dev->struct_mutex); 1213 mutex_unlock(&dev->struct_mutex);
1176 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1214 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
1177 mutex_lock(&dev->struct_mutex); 1215 mutex_lock(&dev->struct_mutex);
1178 if (ret) 1216 if (ret)
1179 return ret; 1217 return ret;
@@ -1222,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1222 * We will repeat the flush holding the lock in the normal manner 1260 * We will repeat the flush holding the lock in the normal manner
1223 * to catch cases where we are gazumped. 1261 * to catch cases where we are gazumped.
1224 */ 1262 */
1225 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); 1263 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
1226 if (ret) 1264 if (ret)
1227 goto unref; 1265 goto unref;
1228 1266
@@ -1918,7 +1956,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1918 return 0; 1956 return 0;
1919} 1957}
1920 1958
1921void 1959static void
1922i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1960i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1923 struct intel_ring_buffer *ring) 1961 struct intel_ring_buffer *ring)
1924{ 1962{
@@ -1957,6 +1995,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1957 } 1995 }
1958} 1996}
1959 1997
1998void i915_vma_move_to_active(struct i915_vma *vma,
1999 struct intel_ring_buffer *ring)
2000{
2001 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2002 return i915_gem_object_move_to_active(vma->obj, ring);
2003}
2004
1960static void 2005static void
1961i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2006i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1962{ 2007{
@@ -2078,11 +2123,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2078 if (ret) 2123 if (ret)
2079 return ret; 2124 return ret;
2080 2125
2081 request = kmalloc(sizeof(*request), GFP_KERNEL); 2126 request = ring->preallocated_lazy_request;
2082 if (request == NULL) 2127 if (WARN_ON(request == NULL))
2083 return -ENOMEM; 2128 return -ENOMEM;
2084 2129
2085
2086 /* Record the position of the start of the request so that 2130 /* Record the position of the start of the request so that
2087 * should we detect the updated seqno part-way through the 2131 * should we detect the updated seqno part-way through the
2088 * GPU processing the request, we never over-estimate the 2132 * GPU processing the request, we never over-estimate the
@@ -2091,17 +2135,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2091 request_ring_position = intel_ring_get_tail(ring); 2135 request_ring_position = intel_ring_get_tail(ring);
2092 2136
2093 ret = ring->add_request(ring); 2137 ret = ring->add_request(ring);
2094 if (ret) { 2138 if (ret)
2095 kfree(request);
2096 return ret; 2139 return ret;
2097 }
2098 2140
2099 request->seqno = intel_ring_get_seqno(ring); 2141 request->seqno = intel_ring_get_seqno(ring);
2100 request->ring = ring; 2142 request->ring = ring;
2101 request->head = request_start; 2143 request->head = request_start;
2102 request->tail = request_ring_position; 2144 request->tail = request_ring_position;
2103 request->ctx = ring->last_context;
2104 request->batch_obj = obj;
2105 2145
2106 /* Whilst this request exists, batch_obj will be on the 2146 /* Whilst this request exists, batch_obj will be on the
2107 * active_list, and so will hold the active reference. Only when this 2147 * active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2149,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2109 * inactive_list and lose its active reference. Hence we do not need 2149 * inactive_list and lose its active reference. Hence we do not need
2110 * to explicitly hold another reference here. 2150 * to explicitly hold another reference here.
2111 */ 2151 */
2152 request->batch_obj = obj;
2112 2153
2154 /* Hold a reference to the current context so that we can inspect
2155 * it later in case a hangcheck error event fires.
2156 */
2157 request->ctx = ring->last_context;
2113 if (request->ctx) 2158 if (request->ctx)
2114 i915_gem_context_reference(request->ctx); 2159 i915_gem_context_reference(request->ctx);
2115 2160
@@ -2129,12 +2174,14 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2129 } 2174 }
2130 2175
2131 trace_i915_gem_request_add(ring, request->seqno); 2176 trace_i915_gem_request_add(ring, request->seqno);
2132 ring->outstanding_lazy_request = 0; 2177 ring->outstanding_lazy_seqno = 0;
2178 ring->preallocated_lazy_request = NULL;
2133 2179
2134 if (!dev_priv->ums.mm_suspended) { 2180 if (!dev_priv->ums.mm_suspended) {
2135 i915_queue_hangcheck(ring->dev); 2181 i915_queue_hangcheck(ring->dev);
2136 2182
2137 if (was_empty) { 2183 if (was_empty) {
2184 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2138 queue_delayed_work(dev_priv->wq, 2185 queue_delayed_work(dev_priv->wq,
2139 &dev_priv->mm.retire_work, 2186 &dev_priv->mm.retire_work,
2140 round_jiffies_up_relative(HZ)); 2187 round_jiffies_up_relative(HZ));
@@ -2156,10 +2203,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2156 return; 2203 return;
2157 2204
2158 spin_lock(&file_priv->mm.lock); 2205 spin_lock(&file_priv->mm.lock);
2159 if (request->file_priv) { 2206 list_del(&request->client_list);
2160 list_del(&request->client_list); 2207 request->file_priv = NULL;
2161 request->file_priv = NULL;
2162 }
2163 spin_unlock(&file_priv->mm.lock); 2208 spin_unlock(&file_priv->mm.lock);
2164} 2209}
2165 2210
@@ -2224,6 +2269,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
2224 return false; 2269 return false;
2225} 2270}
2226 2271
2272static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2273{
2274 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2275
2276 if (hs->banned)
2277 return true;
2278
2279 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2280 DRM_ERROR("context hanging too fast, declaring banned!\n");
2281 return true;
2282 }
2283
2284 return false;
2285}
2286
2227static void i915_set_reset_status(struct intel_ring_buffer *ring, 2287static void i915_set_reset_status(struct intel_ring_buffer *ring,
2228 struct drm_i915_gem_request *request, 2288 struct drm_i915_gem_request *request,
2229 u32 acthd) 2289 u32 acthd)
@@ -2260,10 +2320,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
2260 hs = &request->file_priv->hang_stats; 2320 hs = &request->file_priv->hang_stats;
2261 2321
2262 if (hs) { 2322 if (hs) {
2263 if (guilty) 2323 if (guilty) {
2324 hs->banned = i915_context_is_banned(hs);
2264 hs->batch_active++; 2325 hs->batch_active++;
2265 else 2326 hs->guilty_ts = get_seconds();
2327 } else {
2266 hs->batch_pending++; 2328 hs->batch_pending++;
2329 }
2267 } 2330 }
2268} 2331}
2269 2332
@@ -2405,57 +2468,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2405 WARN_ON(i915_verify_lists(ring->dev)); 2468 WARN_ON(i915_verify_lists(ring->dev));
2406} 2469}
2407 2470
2408void 2471bool
2409i915_gem_retire_requests(struct drm_device *dev) 2472i915_gem_retire_requests(struct drm_device *dev)
2410{ 2473{
2411 drm_i915_private_t *dev_priv = dev->dev_private; 2474 drm_i915_private_t *dev_priv = dev->dev_private;
2412 struct intel_ring_buffer *ring; 2475 struct intel_ring_buffer *ring;
2476 bool idle = true;
2413 int i; 2477 int i;
2414 2478
2415 for_each_ring(ring, dev_priv, i) 2479 for_each_ring(ring, dev_priv, i) {
2416 i915_gem_retire_requests_ring(ring); 2480 i915_gem_retire_requests_ring(ring);
2481 idle &= list_empty(&ring->request_list);
2482 }
2483
2484 if (idle)
2485 mod_delayed_work(dev_priv->wq,
2486 &dev_priv->mm.idle_work,
2487 msecs_to_jiffies(100));
2488
2489 return idle;
2417} 2490}
2418 2491
2419static void 2492static void
2420i915_gem_retire_work_handler(struct work_struct *work) 2493i915_gem_retire_work_handler(struct work_struct *work)
2421{ 2494{
2422 drm_i915_private_t *dev_priv; 2495 struct drm_i915_private *dev_priv =
2423 struct drm_device *dev; 2496 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2424 struct intel_ring_buffer *ring; 2497 struct drm_device *dev = dev_priv->dev;
2425 bool idle; 2498 bool idle;
2426 int i;
2427
2428 dev_priv = container_of(work, drm_i915_private_t,
2429 mm.retire_work.work);
2430 dev = dev_priv->dev;
2431 2499
2432 /* Come back later if the device is busy... */ 2500 /* Come back later if the device is busy... */
2433 if (!mutex_trylock(&dev->struct_mutex)) { 2501 idle = false;
2434 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2502 if (mutex_trylock(&dev->struct_mutex)) {
2435 round_jiffies_up_relative(HZ)); 2503 idle = i915_gem_retire_requests(dev);
2436 return; 2504 mutex_unlock(&dev->struct_mutex);
2437 }
2438
2439 i915_gem_retire_requests(dev);
2440
2441 /* Send a periodic flush down the ring so we don't hold onto GEM
2442 * objects indefinitely.
2443 */
2444 idle = true;
2445 for_each_ring(ring, dev_priv, i) {
2446 if (ring->gpu_caches_dirty)
2447 i915_add_request(ring, NULL);
2448
2449 idle &= list_empty(&ring->request_list);
2450 } 2505 }
2451 2506 if (!idle)
2452 if (!dev_priv->ums.mm_suspended && !idle)
2453 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2507 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2454 round_jiffies_up_relative(HZ)); 2508 round_jiffies_up_relative(HZ));
2455 if (idle) 2509}
2456 intel_mark_idle(dev);
2457 2510
2458 mutex_unlock(&dev->struct_mutex); 2511static void
2512i915_gem_idle_work_handler(struct work_struct *work)
2513{
2514 struct drm_i915_private *dev_priv =
2515 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2516
2517 intel_mark_idle(dev_priv->dev);
2459} 2518}
2460 2519
2461/** 2520/**
@@ -2553,7 +2612,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2553 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2612 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2554 mutex_unlock(&dev->struct_mutex); 2613 mutex_unlock(&dev->struct_mutex);
2555 2614
2556 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2615 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2557 if (timeout) 2616 if (timeout)
2558 args->timeout_ns = timespec_to_ns(timeout); 2617 args->timeout_ns = timespec_to_ns(timeout);
2559 return ret; 2618 return ret;
@@ -2600,6 +2659,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2600 if (ret) 2659 if (ret)
2601 return ret; 2660 return ret;
2602 2661
2662 trace_i915_gem_ring_sync_to(from, to, seqno);
2603 ret = to->sync_to(to, from, seqno); 2663 ret = to->sync_to(to, from, seqno);
2604 if (!ret) 2664 if (!ret)
2605 /* We use last_read_seqno because sync_to() 2665 /* We use last_read_seqno because sync_to()
@@ -2641,11 +2701,17 @@ int i915_vma_unbind(struct i915_vma *vma)
2641 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2701 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2642 int ret; 2702 int ret;
2643 2703
2704 /* For now we only ever use 1 vma per object */
2705 WARN_ON(!list_is_singular(&obj->vma_list));
2706
2644 if (list_empty(&vma->vma_link)) 2707 if (list_empty(&vma->vma_link))
2645 return 0; 2708 return 0;
2646 2709
2647 if (!drm_mm_node_allocated(&vma->node)) 2710 if (!drm_mm_node_allocated(&vma->node)) {
2648 goto destroy; 2711 i915_gem_vma_destroy(vma);
2712
2713 return 0;
2714 }
2649 2715
2650 if (obj->pin_count) 2716 if (obj->pin_count)
2651 return -EBUSY; 2717 return -EBUSY;
@@ -2685,13 +2751,10 @@ int i915_vma_unbind(struct i915_vma *vma)
2685 2751
2686 drm_mm_remove_node(&vma->node); 2752 drm_mm_remove_node(&vma->node);
2687 2753
2688destroy:
2689 i915_gem_vma_destroy(vma); 2754 i915_gem_vma_destroy(vma);
2690 2755
2691 /* Since the unbound list is global, only move to that list if 2756 /* Since the unbound list is global, only move to that list if
2692 * no more VMAs exist. 2757 * no more VMAs exist. */
2693 * NB: Until we have real VMAs there will only ever be one */
2694 WARN_ON(!list_empty(&obj->vma_list));
2695 if (list_empty(&obj->vma_list)) 2758 if (list_empty(&obj->vma_list))
2696 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2759 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2697 2760
@@ -3389,8 +3452,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3389 3452
3390 /* And bump the LRU for this access */ 3453 /* And bump the LRU for this access */
3391 if (i915_gem_object_is_inactive(obj)) { 3454 if (i915_gem_object_is_inactive(obj)) {
3392 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 3455 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3393 &dev_priv->gtt.base);
3394 if (vma) 3456 if (vma)
3395 list_move_tail(&vma->mm_list, 3457 list_move_tail(&vma->mm_list,
3396 &dev_priv->gtt.base.inactive_list); 3458 &dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3823,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3761 if (seqno == 0) 3823 if (seqno == 0)
3762 return 0; 3824 return 0;
3763 3825
3764 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 3826 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3765 if (ret == 0) 3827 if (ret == 0)
3766 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3828 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3767 3829
@@ -4015,7 +4077,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4015{ 4077{
4016 INIT_LIST_HEAD(&obj->global_list); 4078 INIT_LIST_HEAD(&obj->global_list);
4017 INIT_LIST_HEAD(&obj->ring_list); 4079 INIT_LIST_HEAD(&obj->ring_list);
4018 INIT_LIST_HEAD(&obj->exec_list);
4019 INIT_LIST_HEAD(&obj->obj_exec_link); 4080 INIT_LIST_HEAD(&obj->obj_exec_link);
4020 INIT_LIST_HEAD(&obj->vma_list); 4081 INIT_LIST_HEAD(&obj->vma_list);
4021 4082
@@ -4087,13 +4148,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4087 return obj; 4148 return obj;
4088} 4149}
4089 4150
4090int i915_gem_init_object(struct drm_gem_object *obj)
4091{
4092 BUG();
4093
4094 return 0;
4095}
4096
4097void i915_gem_free_object(struct drm_gem_object *gem_obj) 4151void i915_gem_free_object(struct drm_gem_object *gem_obj)
4098{ 4152{
4099 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4153 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,9 +4201,20 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4147 i915_gem_object_free(obj); 4201 i915_gem_object_free(obj);
4148} 4202}
4149 4203
4150struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, 4204struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4151 struct i915_address_space *vm) 4205 struct i915_address_space *vm)
4152{ 4206{
4207 struct i915_vma *vma;
4208 list_for_each_entry(vma, &obj->vma_list, vma_link)
4209 if (vma->vm == vm)
4210 return vma;
4211
4212 return NULL;
4213}
4214
4215static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4216 struct i915_address_space *vm)
4217{
4153 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 4218 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4154 if (vma == NULL) 4219 if (vma == NULL)
4155 return ERR_PTR(-ENOMEM); 4220 return ERR_PTR(-ENOMEM);
@@ -4169,10 +4234,29 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4169 return vma; 4234 return vma;
4170} 4235}
4171 4236
4237struct i915_vma *
4238i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4239 struct i915_address_space *vm)
4240{
4241 struct i915_vma *vma;
4242
4243 vma = i915_gem_obj_to_vma(obj, vm);
4244 if (!vma)
4245 vma = __i915_gem_vma_create(obj, vm);
4246
4247 return vma;
4248}
4249
4172void i915_gem_vma_destroy(struct i915_vma *vma) 4250void i915_gem_vma_destroy(struct i915_vma *vma)
4173{ 4251{
4174 WARN_ON(vma->node.allocated); 4252 WARN_ON(vma->node.allocated);
4253
4254 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4255 if (!list_empty(&vma->exec_list))
4256 return;
4257
4175 list_del(&vma->vma_link); 4258 list_del(&vma->vma_link);
4259
4176 kfree(vma); 4260 kfree(vma);
4177} 4261}
4178 4262
@@ -4182,16 +4266,13 @@ i915_gem_idle(struct drm_device *dev)
4182 drm_i915_private_t *dev_priv = dev->dev_private; 4266 drm_i915_private_t *dev_priv = dev->dev_private;
4183 int ret; 4267 int ret;
4184 4268
4185 if (dev_priv->ums.mm_suspended) { 4269 if (dev_priv->ums.mm_suspended)
4186 mutex_unlock(&dev->struct_mutex);
4187 return 0; 4270 return 0;
4188 }
4189 4271
4190 ret = i915_gpu_idle(dev); 4272 ret = i915_gpu_idle(dev);
4191 if (ret) { 4273 if (ret)
4192 mutex_unlock(&dev->struct_mutex);
4193 return ret; 4274 return ret;
4194 } 4275
4195 i915_gem_retire_requests(dev); 4276 i915_gem_retire_requests(dev);
4196 4277
4197 /* Under UMS, be paranoid and evict. */ 4278 /* Under UMS, be paranoid and evict. */
@@ -4205,40 +4286,40 @@ i915_gem_idle(struct drm_device *dev)
4205 4286
4206 /* Cancel the retire work handler, which should be idle now. */ 4287 /* Cancel the retire work handler, which should be idle now. */
4207 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4288 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4289 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4208 4290
4209 return 0; 4291 return 0;
4210} 4292}
4211 4293
4212void i915_gem_l3_remap(struct drm_device *dev) 4294int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4213{ 4295{
4296 struct drm_device *dev = ring->dev;
4214 drm_i915_private_t *dev_priv = dev->dev_private; 4297 drm_i915_private_t *dev_priv = dev->dev_private;
4215 u32 misccpctl; 4298 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4216 int i; 4299 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4300 int i, ret;
4217 4301
4218 if (!HAS_L3_GPU_CACHE(dev)) 4302 if (!HAS_L3_DPF(dev) || !remap_info)
4219 return; 4303 return 0;
4220
4221 if (!dev_priv->l3_parity.remap_info)
4222 return;
4223 4304
4224 misccpctl = I915_READ(GEN7_MISCCPCTL); 4305 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4225 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 4306 if (ret)
4226 POSTING_READ(GEN7_MISCCPCTL); 4307 return ret;
4227 4308
4309 /*
4310 * Note: We do not worry about the concurrent register cacheline hang
4311 * here because no other code should access these registers other than
4312 * at initialization time.
4313 */
4228 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4314 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4229 u32 remap = I915_READ(GEN7_L3LOG_BASE + i); 4315 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4230 if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) 4316 intel_ring_emit(ring, reg_base + i);
4231 DRM_DEBUG("0x%x was already programmed to %x\n", 4317 intel_ring_emit(ring, remap_info[i/4]);
4232 GEN7_L3LOG_BASE + i, remap);
4233 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4234 DRM_DEBUG_DRIVER("Clearing remapped register\n");
4235 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4236 } 4318 }
4237 4319
4238 /* Make sure all the writes land before disabling dop clock gating */ 4320 intel_ring_advance(ring);
4239 POSTING_READ(GEN7_L3LOG_BASE);
4240 4321
4241 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 4322 return ret;
4242} 4323}
4243 4324
4244void i915_gem_init_swizzling(struct drm_device *dev) 4325void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4330,7 +4411,7 @@ int
4330i915_gem_init_hw(struct drm_device *dev) 4411i915_gem_init_hw(struct drm_device *dev)
4331{ 4412{
4332 drm_i915_private_t *dev_priv = dev->dev_private; 4413 drm_i915_private_t *dev_priv = dev->dev_private;
4333 int ret; 4414 int ret, i;
4334 4415
4335 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4416 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4336 return -EIO; 4417 return -EIO;
@@ -4338,20 +4419,26 @@ i915_gem_init_hw(struct drm_device *dev)
4338 if (dev_priv->ellc_size) 4419 if (dev_priv->ellc_size)
4339 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4420 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4340 4421
4422 if (IS_HSW_GT3(dev))
4423 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
4424 else
4425 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4426
4341 if (HAS_PCH_NOP(dev)) { 4427 if (HAS_PCH_NOP(dev)) {
4342 u32 temp = I915_READ(GEN7_MSG_CTL); 4428 u32 temp = I915_READ(GEN7_MSG_CTL);
4343 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4429 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4344 I915_WRITE(GEN7_MSG_CTL, temp); 4430 I915_WRITE(GEN7_MSG_CTL, temp);
4345 } 4431 }
4346 4432
4347 i915_gem_l3_remap(dev);
4348
4349 i915_gem_init_swizzling(dev); 4433 i915_gem_init_swizzling(dev);
4350 4434
4351 ret = i915_gem_init_rings(dev); 4435 ret = i915_gem_init_rings(dev);
4352 if (ret) 4436 if (ret)
4353 return ret; 4437 return ret;
4354 4438
4439 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4440 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4441
4355 /* 4442 /*
4356 * XXX: There was some w/a described somewhere suggesting loading 4443 * XXX: There was some w/a described somewhere suggesting loading
4357 * contexts before PPGTT. 4444 * contexts before PPGTT.
@@ -4523,6 +4610,7 @@ i915_gem_load(struct drm_device *dev)
4523 INIT_LIST_HEAD(&dev_priv->vm_list); 4610 INIT_LIST_HEAD(&dev_priv->vm_list);
4524 i915_init_vm(dev_priv, &dev_priv->gtt.base); 4611 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4525 4612
4613 INIT_LIST_HEAD(&dev_priv->context_list);
4526 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4614 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4527 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4615 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4528 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4616 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4620,8 @@ i915_gem_load(struct drm_device *dev)
4532 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4620 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4533 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4621 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4534 i915_gem_retire_work_handler); 4622 i915_gem_retire_work_handler);
4623 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4624 i915_gem_idle_work_handler);
4535 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4625 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4536 4626
4537 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 4627 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4672,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
4582 if (dev_priv->mm.phys_objs[id - 1] || !size) 4672 if (dev_priv->mm.phys_objs[id - 1] || !size)
4583 return 0; 4673 return 0;
4584 4674
4585 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); 4675 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4586 if (!phys_obj) 4676 if (!phys_obj)
4587 return -ENOMEM; 4677 return -ENOMEM;
4588 4678
@@ -4756,6 +4846,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4756{ 4846{
4757 struct drm_i915_file_private *file_priv = file->driver_priv; 4847 struct drm_i915_file_private *file_priv = file->driver_priv;
4758 4848
4849 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4850
4759 /* Clean up our request list when the client is going away, so that 4851 /* Clean up our request list when the client is going away, so that
4760 * later retire_requests won't dereference our soon-to-be-gone 4852 * later retire_requests won't dereference our soon-to-be-gone
4761 * file_priv. 4853 * file_priv.
@@ -4773,6 +4865,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4773 spin_unlock(&file_priv->mm.lock); 4865 spin_unlock(&file_priv->mm.lock);
4774} 4866}
4775 4867
4868static void
4869i915_gem_file_idle_work_handler(struct work_struct *work)
4870{
4871 struct drm_i915_file_private *file_priv =
4872 container_of(work, typeof(*file_priv), mm.idle_work.work);
4873
4874 atomic_set(&file_priv->rps_wait_boost, false);
4875}
4876
4877int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4878{
4879 struct drm_i915_file_private *file_priv;
4880
4881 DRM_DEBUG_DRIVER("\n");
4882
4883 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4884 if (!file_priv)
4885 return -ENOMEM;
4886
4887 file->driver_priv = file_priv;
4888 file_priv->dev_priv = dev->dev_private;
4889
4890 spin_lock_init(&file_priv->mm.lock);
4891 INIT_LIST_HEAD(&file_priv->mm.request_list);
4892 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4893 i915_gem_file_idle_work_handler);
4894
4895 idr_init(&file_priv->context_idr);
4896
4897 return 0;
4898}
4899
4776static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 4900static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4777{ 4901{
4778 if (!mutex_is_locked(mutex)) 4902 if (!mutex_is_locked(mutex))
@@ -4859,11 +4983,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4859 4983
4860bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) 4984bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4861{ 4985{
4862 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4986 struct i915_vma *vma;
4863 struct i915_address_space *vm;
4864 4987
4865 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 4988 list_for_each_entry(vma, &o->vma_list, vma_link)
4866 if (i915_gem_obj_bound(o, vm)) 4989 if (drm_mm_node_allocated(&vma->node))
4867 return true; 4990 return true;
4868 4991
4869 return false; 4992 return false;
@@ -4921,26 +5044,16 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4921 return freed; 5044 return freed;
4922} 5045}
4923 5046
4924struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 5047struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
4925 struct i915_address_space *vm)
4926{ 5048{
4927 struct i915_vma *vma; 5049 struct i915_vma *vma;
4928 list_for_each_entry(vma, &obj->vma_list, vma_link)
4929 if (vma->vm == vm)
4930 return vma;
4931
4932 return NULL;
4933}
4934 5050
4935struct i915_vma * 5051 if (WARN_ON(list_empty(&obj->vma_list)))
4936i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 5052 return NULL;
4937 struct i915_address_space *vm)
4938{
4939 struct i915_vma *vma;
4940 5053
4941 vma = i915_gem_obj_to_vma(obj, vm); 5054 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
4942 if (!vma) 5055 if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
4943 vma = i915_gem_vma_create(obj, vm); 5056 return NULL;
4944 5057
4945 return vma; 5058 return vma;
4946} 5059}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c2a7d6..1a877a547290 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
73 * 73 *
74 * There are two confusing terms used above: 74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the 75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded it's state already and has stored away the gtt 76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this 77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this 78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset. 79 * is to do a GPU reset.
@@ -129,6 +129,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
129 struct i915_hw_context *ctx = container_of(ctx_ref, 129 struct i915_hw_context *ctx = container_of(ctx_ref,
130 typeof(*ctx), ref); 130 typeof(*ctx), ref);
131 131
132 list_del(&ctx->link);
132 drm_gem_object_unreference(&ctx->obj->base); 133 drm_gem_object_unreference(&ctx->obj->base);
133 kfree(ctx); 134 kfree(ctx);
134} 135}
@@ -147,6 +148,7 @@ create_hw_context(struct drm_device *dev,
147 148
148 kref_init(&ctx->ref); 149 kref_init(&ctx->ref);
149 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 150 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
151 INIT_LIST_HEAD(&ctx->link);
150 if (ctx->obj == NULL) { 152 if (ctx->obj == NULL) {
151 kfree(ctx); 153 kfree(ctx);
152 DRM_DEBUG_DRIVER("Context object allocated failed\n"); 154 DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +168,7 @@ create_hw_context(struct drm_device *dev,
166 * assertion in the context switch code. 168 * assertion in the context switch code.
167 */ 169 */
168 ctx->ring = &dev_priv->ring[RCS]; 170 ctx->ring = &dev_priv->ring[RCS];
171 list_add_tail(&ctx->link, &dev_priv->context_list);
169 172
170 /* Default context will never have a file_priv */ 173 /* Default context will never have a file_priv */
171 if (file_priv == NULL) 174 if (file_priv == NULL)
@@ -178,6 +181,10 @@ create_hw_context(struct drm_device *dev,
178 181
179 ctx->file_priv = file_priv; 182 ctx->file_priv = file_priv;
180 ctx->id = ret; 183 ctx->id = ret;
184 /* NB: Mark all slices as needing a remap so that when the context first
185 * loads it will restore whatever remap state already exists. If there
186 * is no remap info, it will be a NOP. */
187 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
181 188
182 return ctx; 189 return ctx;
183 190
@@ -393,11 +400,11 @@ static int do_switch(struct i915_hw_context *to)
393 struct intel_ring_buffer *ring = to->ring; 400 struct intel_ring_buffer *ring = to->ring;
394 struct i915_hw_context *from = ring->last_context; 401 struct i915_hw_context *from = ring->last_context;
395 u32 hw_flags = 0; 402 u32 hw_flags = 0;
396 int ret; 403 int ret, i;
397 404
398 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); 405 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
399 406
400 if (from == to) 407 if (from == to && !to->remap_slice)
401 return 0; 408 return 0;
402 409
403 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); 410 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +427,6 @@ static int do_switch(struct i915_hw_context *to)
420 427
421 if (!to->is_initialized || is_default_context(to)) 428 if (!to->is_initialized || is_default_context(to))
422 hw_flags |= MI_RESTORE_INHIBIT; 429 hw_flags |= MI_RESTORE_INHIBIT;
423 else if (WARN_ON_ONCE(from == to)) /* not yet expected */
424 hw_flags |= MI_FORCE_RESTORE;
425 430
426 ret = mi_set_context(ring, to, hw_flags); 431 ret = mi_set_context(ring, to, hw_flags);
427 if (ret) { 432 if (ret) {
@@ -429,6 +434,18 @@ static int do_switch(struct i915_hw_context *to)
429 return ret; 434 return ret;
430 } 435 }
431 436
437 for (i = 0; i < MAX_L3_SLICES; i++) {
438 if (!(to->remap_slice & (1<<i)))
439 continue;
440
441 ret = i915_gem_l3_remap(ring, i);
442 /* If it failed, try again next round */
443 if (ret)
444 DRM_DEBUG_DRIVER("L3 remapping failed\n");
445 else
446 to->remap_slice &= ~(1<<i);
447 }
448
432 /* The backing object for the context is done after switching to the 449 /* The backing object for the context is done after switching to the
433 * *next* context. Therefore we cannot retire the previous context until 450 * *next* context. Therefore we cannot retire the previous context until
434 * the next context has already started running. In fact, the below code 451 * the next context has already started running. In fact, the below code
@@ -436,11 +453,8 @@ static int do_switch(struct i915_hw_context *to)
436 * MI_SET_CONTEXT instead of when the next seqno has completed. 453 * MI_SET_CONTEXT instead of when the next seqno has completed.
437 */ 454 */
438 if (from != NULL) { 455 if (from != NULL) {
439 struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
440 struct i915_address_space *ggtt = &dev_priv->gtt.base;
441 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 456 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
442 list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); 457 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
443 i915_gem_object_move_to_active(from->obj, ring);
444 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 458 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
445 * whole damn pipeline, we don't need to explicitly mark the 459 * whole damn pipeline, we don't need to explicitly mark the
446 * object dirty. The only exception is that the context must be 460 * object dirty. The only exception is that the context must be
@@ -451,17 +465,7 @@ static int do_switch(struct i915_hw_context *to)
451 from->obj->dirty = 1; 465 from->obj->dirty = 1;
452 BUG_ON(from->obj->ring != ring); 466 BUG_ON(from->obj->ring != ring);
453 467
454 ret = i915_add_request(ring, NULL); 468 /* obj is kept alive until the next request by its active ref */
455 if (ret) {
456 /* Too late, we've already scheduled a context switch.
457 * Try to undo the change so that the hw state is
458 * consistent with out tracking. In case of emergency,
459 * scream.
460 */
461 WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
462 return ret;
463 }
464
465 i915_gem_object_unpin(from->obj); 469 i915_gem_object_unpin(from->obj);
466 i915_gem_context_unreference(from); 470 i915_gem_context_unreference(from);
467 } 471 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 91b700155850..b7376533633d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
37 if (vma->obj->pin_count) 37 if (vma->obj->pin_count)
38 return false; 38 return false;
39 39
40 if (WARN_ON(!list_empty(&vma->exec_list)))
41 return false;
42
40 list_add(&vma->exec_list, unwind); 43 list_add(&vma->exec_list, unwind);
41 return drm_mm_scan_add_block(&vma->node); 44 return drm_mm_scan_add_block(&vma->node);
42} 45}
@@ -113,7 +116,7 @@ none:
113 } 116 }
114 117
115 /* We expect the caller to unpin, evict all and try again, or give up. 118 /* We expect the caller to unpin, evict all and try again, or give up.
116 * So calling i915_gem_evict_everything() is unnecessary. 119 * So calling i915_gem_evict_vm() is unnecessary.
117 */ 120 */
118 return -ENOSPC; 121 return -ENOSPC;
119 122
@@ -152,12 +155,48 @@ found:
152 return ret; 155 return ret;
153} 156}
154 157
158/**
159 * i915_gem_evict_vm - Try to free up VM space
160 *
161 * @vm: Address space to evict from
162 * @do_idle: Boolean directing whether to idle first.
163 *
164 * VM eviction is about freeing up virtual address space. If one wants fine
165 * grained eviction, they should see evict something for more details. In terms
166 * of freeing up actual system memory, this function may not accomplish the
167 * desired result. An object may be shared in multiple address space, and this
168 * function will not assert those objects be freed.
169 *
170 * Using do_idle will result in a more complete eviction because it retires, and
171 * inactivates current BOs.
172 */
173int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
174{
175 struct i915_vma *vma, *next;
176 int ret;
177
178 trace_i915_gem_evict_vm(vm);
179
180 if (do_idle) {
181 ret = i915_gpu_idle(vm->dev);
182 if (ret)
183 return ret;
184
185 i915_gem_retire_requests(vm->dev);
186 }
187
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191
192 return 0;
193}
194
155int 195int
156i915_gem_evict_everything(struct drm_device *dev) 196i915_gem_evict_everything(struct drm_device *dev)
157{ 197{
158 drm_i915_private_t *dev_priv = dev->dev_private; 198 drm_i915_private_t *dev_priv = dev->dev_private;
159 struct i915_address_space *vm; 199 struct i915_address_space *vm;
160 struct i915_vma *vma, *next;
161 bool lists_empty = true; 200 bool lists_empty = true;
162 int ret; 201 int ret;
163 202
@@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
184 i915_gem_retire_requests(dev); 223 i915_gem_retire_requests(dev);
185 224
186 /* Having flushed everything, unbind() should never raise an error */ 225 /* Having flushed everything, unbind() should never raise an error */
187 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 226 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) 227 WARN_ON(i915_gem_evict_vm(vm, false));
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191 }
192 228
193 return 0; 229 return 0;
194} 230}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf345777ae9f..0ce0d47e4b0f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36struct eb_objects { 36struct eb_vmas {
37 struct list_head objects; 37 struct list_head vmas;
38 int and; 38 int and;
39 union { 39 union {
40 struct drm_i915_gem_object *lut[0]; 40 struct i915_vma *lut[0];
41 struct hlist_head buckets[0]; 41 struct hlist_head buckets[0];
42 }; 42 };
43}; 43};
44 44
45static struct eb_objects * 45static struct eb_vmas *
46eb_create(struct drm_i915_gem_execbuffer2 *args) 46eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
47{ 47{
48 struct eb_objects *eb = NULL; 48 struct eb_vmas *eb = NULL;
49 49
50 if (args->flags & I915_EXEC_HANDLE_LUT) { 50 if (args->flags & I915_EXEC_HANDLE_LUT) {
51 int size = args->buffer_count; 51 unsigned size = args->buffer_count;
52 size *= sizeof(struct drm_i915_gem_object *); 52 size *= sizeof(struct i915_vma *);
53 size += sizeof(struct eb_objects); 53 size += sizeof(struct eb_vmas);
54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55 } 55 }
56 56
57 if (eb == NULL) { 57 if (eb == NULL) {
58 int size = args->buffer_count; 58 unsigned size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 59 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); 60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 while (count > 2*size) 61 while (count > 2*size)
62 count >>= 1; 62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) + 63 eb = kzalloc(count*sizeof(struct hlist_head) +
64 sizeof(struct eb_objects), 64 sizeof(struct eb_vmas),
65 GFP_TEMPORARY); 65 GFP_TEMPORARY);
66 if (eb == NULL) 66 if (eb == NULL)
67 return eb; 67 return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
70 } else 70 } else
71 eb->and = -args->buffer_count; 71 eb->and = -args->buffer_count;
72 72
73 INIT_LIST_HEAD(&eb->objects); 73 INIT_LIST_HEAD(&eb->vmas);
74 return eb; 74 return eb;
75} 75}
76 76
77static void 77static void
78eb_reset(struct eb_objects *eb) 78eb_reset(struct eb_vmas *eb)
79{ 79{
80 if (eb->and >= 0) 80 if (eb->and >= 0)
81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); 81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82} 82}
83 83
84static int 84static int
85eb_lookup_objects(struct eb_objects *eb, 85eb_lookup_vmas(struct eb_vmas *eb,
86 struct drm_i915_gem_exec_object2 *exec, 86 struct drm_i915_gem_exec_object2 *exec,
87 const struct drm_i915_gem_execbuffer2 *args, 87 const struct drm_i915_gem_execbuffer2 *args,
88 struct drm_file *file) 88 struct i915_address_space *vm,
89 struct drm_file *file)
89{ 90{
90 int i; 91 struct drm_i915_gem_object *obj;
92 struct list_head objects;
93 int i, ret = 0;
91 94
95 INIT_LIST_HEAD(&objects);
92 spin_lock(&file->table_lock); 96 spin_lock(&file->table_lock);
97 /* Grab a reference to the object and release the lock so we can lookup
98 * or create the VMA without using GFP_ATOMIC */
93 for (i = 0; i < args->buffer_count; i++) { 99 for (i = 0; i < args->buffer_count; i++) {
94 struct drm_i915_gem_object *obj;
95
96 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); 100 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
97 if (obj == NULL) { 101 if (obj == NULL) {
98 spin_unlock(&file->table_lock); 102 spin_unlock(&file->table_lock);
99 DRM_DEBUG("Invalid object handle %d at index %d\n", 103 DRM_DEBUG("Invalid object handle %d at index %d\n",
100 exec[i].handle, i); 104 exec[i].handle, i);
101 return -ENOENT; 105 ret = -ENOENT;
106 goto out;
102 } 107 }
103 108
104 if (!list_empty(&obj->exec_list)) { 109 if (!list_empty(&obj->obj_exec_link)) {
105 spin_unlock(&file->table_lock); 110 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 111 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
107 obj, exec[i].handle, i); 112 obj, exec[i].handle, i);
108 return -EINVAL; 113 ret = -EINVAL;
114 goto out;
109 } 115 }
110 116
111 drm_gem_object_reference(&obj->base); 117 drm_gem_object_reference(&obj->base);
112 list_add_tail(&obj->exec_list, &eb->objects); 118 list_add_tail(&obj->obj_exec_link, &objects);
119 }
120 spin_unlock(&file->table_lock);
121
122 i = 0;
123 list_for_each_entry(obj, &objects, obj_exec_link) {
124 struct i915_vma *vma;
113 125
114 obj->exec_entry = &exec[i]; 126 /*
127 * NOTE: We can leak any vmas created here when something fails
128 * later on. But that's no issue since vma_unbind can deal with
129 * vmas which are not actually bound. And since only
130 * lookup_or_create exists as an interface to get at the vma
131 * from the (obj, vm) we don't run the risk of creating
132 * duplicated vmas for the same vm.
133 */
134 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
135 if (IS_ERR(vma)) {
136 DRM_DEBUG("Failed to lookup VMA\n");
137 ret = PTR_ERR(vma);
138 goto out;
139 }
140
141 list_add_tail(&vma->exec_list, &eb->vmas);
142
143 vma->exec_entry = &exec[i];
115 if (eb->and < 0) { 144 if (eb->and < 0) {
116 eb->lut[i] = obj; 145 eb->lut[i] = vma;
117 } else { 146 } else {
118 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; 147 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
119 obj->exec_handle = handle; 148 vma->exec_handle = handle;
120 hlist_add_head(&obj->exec_node, 149 hlist_add_head(&vma->exec_node,
121 &eb->buckets[handle & eb->and]); 150 &eb->buckets[handle & eb->and]);
122 } 151 }
152 ++i;
123 } 153 }
124 spin_unlock(&file->table_lock);
125 154
126 return 0; 155
156out:
157 while (!list_empty(&objects)) {
158 obj = list_first_entry(&objects,
159 struct drm_i915_gem_object,
160 obj_exec_link);
161 list_del_init(&obj->obj_exec_link);
162 if (ret)
163 drm_gem_object_unreference(&obj->base);
164 }
165 return ret;
127} 166}
128 167
129static struct drm_i915_gem_object * 168static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
130eb_get_object(struct eb_objects *eb, unsigned long handle)
131{ 169{
132 if (eb->and < 0) { 170 if (eb->and < 0) {
133 if (handle >= -eb->and) 171 if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
139 177
140 head = &eb->buckets[handle & eb->and]; 178 head = &eb->buckets[handle & eb->and];
141 hlist_for_each(node, head) { 179 hlist_for_each(node, head) {
142 struct drm_i915_gem_object *obj; 180 struct i915_vma *vma;
143 181
144 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); 182 vma = hlist_entry(node, struct i915_vma, exec_node);
145 if (obj->exec_handle == handle) 183 if (vma->exec_handle == handle)
146 return obj; 184 return vma;
147 } 185 }
148 return NULL; 186 return NULL;
149 } 187 }
150} 188}
151 189
152static void 190static void eb_destroy(struct eb_vmas *eb) {
153eb_destroy(struct eb_objects *eb) 191 while (!list_empty(&eb->vmas)) {
154{ 192 struct i915_vma *vma;
155 while (!list_empty(&eb->objects)) {
156 struct drm_i915_gem_object *obj;
157 193
158 obj = list_first_entry(&eb->objects, 194 vma = list_first_entry(&eb->vmas,
159 struct drm_i915_gem_object, 195 struct i915_vma,
160 exec_list); 196 exec_list);
161 list_del_init(&obj->exec_list); 197 list_del_init(&vma->exec_list);
162 drm_gem_object_unreference(&obj->base); 198 drm_gem_object_unreference(&vma->obj->base);
163 } 199 }
164 kfree(eb); 200 kfree(eb);
165} 201}
166 202
167static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) 203static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
168{ 204{
169 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || 205 return (HAS_LLC(obj->base.dev) ||
206 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
170 !obj->map_and_fenceable || 207 !obj->map_and_fenceable ||
171 obj->cache_level != I915_CACHE_NONE); 208 obj->cache_level != I915_CACHE_NONE);
172} 209}
@@ -179,7 +216,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
179 char *vaddr; 216 char *vaddr;
180 int ret = -EINVAL; 217 int ret = -EINVAL;
181 218
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 219 ret = i915_gem_object_set_to_cpu_domain(obj, true);
183 if (ret) 220 if (ret)
184 return ret; 221 return ret;
185 222
@@ -223,22 +260,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
223 260
224static int 261static int
225i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 262i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
226 struct eb_objects *eb, 263 struct eb_vmas *eb,
227 struct drm_i915_gem_relocation_entry *reloc, 264 struct drm_i915_gem_relocation_entry *reloc,
228 struct i915_address_space *vm) 265 struct i915_address_space *vm)
229{ 266{
230 struct drm_device *dev = obj->base.dev; 267 struct drm_device *dev = obj->base.dev;
231 struct drm_gem_object *target_obj; 268 struct drm_gem_object *target_obj;
232 struct drm_i915_gem_object *target_i915_obj; 269 struct drm_i915_gem_object *target_i915_obj;
270 struct i915_vma *target_vma;
233 uint32_t target_offset; 271 uint32_t target_offset;
234 int ret = -EINVAL; 272 int ret = -EINVAL;
235 273
236 /* we've already hold a reference to all valid objects */ 274 /* we've already hold a reference to all valid objects */
237 target_obj = &eb_get_object(eb, reloc->target_handle)->base; 275 target_vma = eb_get_vma(eb, reloc->target_handle);
238 if (unlikely(target_obj == NULL)) 276 if (unlikely(target_vma == NULL))
239 return -ENOENT; 277 return -ENOENT;
278 target_i915_obj = target_vma->obj;
279 target_obj = &target_vma->obj->base;
240 280
241 target_i915_obj = to_intel_bo(target_obj);
242 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); 281 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
243 282
244 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and 283 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -320,14 +359,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
320} 359}
321 360
322static int 361static int
323i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 362i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
324 struct eb_objects *eb, 363 struct eb_vmas *eb)
325 struct i915_address_space *vm)
326{ 364{
327#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 365#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
328 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; 366 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
329 struct drm_i915_gem_relocation_entry __user *user_relocs; 367 struct drm_i915_gem_relocation_entry __user *user_relocs;
330 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 368 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
331 int remain, ret; 369 int remain, ret;
332 370
333 user_relocs = to_user_ptr(entry->relocs_ptr); 371 user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +384,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
346 do { 384 do {
347 u64 offset = r->presumed_offset; 385 u64 offset = r->presumed_offset;
348 386
349 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, 387 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
350 vm); 388 vma->vm);
351 if (ret) 389 if (ret)
352 return ret; 390 return ret;
353 391
@@ -368,17 +406,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
368} 406}
369 407
370static int 408static int
371i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, 409i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
372 struct eb_objects *eb, 410 struct eb_vmas *eb,
373 struct drm_i915_gem_relocation_entry *relocs, 411 struct drm_i915_gem_relocation_entry *relocs)
374 struct i915_address_space *vm)
375{ 412{
376 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 413 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
377 int i, ret; 414 int i, ret;
378 415
379 for (i = 0; i < entry->relocation_count; i++) { 416 for (i = 0; i < entry->relocation_count; i++) {
380 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], 417 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
381 vm); 418 vma->vm);
382 if (ret) 419 if (ret)
383 return ret; 420 return ret;
384 } 421 }
@@ -387,10 +424,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
387} 424}
388 425
389static int 426static int
390i915_gem_execbuffer_relocate(struct eb_objects *eb, 427i915_gem_execbuffer_relocate(struct eb_vmas *eb,
391 struct i915_address_space *vm) 428 struct i915_address_space *vm)
392{ 429{
393 struct drm_i915_gem_object *obj; 430 struct i915_vma *vma;
394 int ret = 0; 431 int ret = 0;
395 432
396 /* This is the fast path and we cannot handle a pagefault whilst 433 /* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +438,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
401 * lockdep complains vehemently. 438 * lockdep complains vehemently.
402 */ 439 */
403 pagefault_disable(); 440 pagefault_disable();
404 list_for_each_entry(obj, &eb->objects, exec_list) { 441 list_for_each_entry(vma, &eb->vmas, exec_list) {
405 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); 442 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
406 if (ret) 443 if (ret)
407 break; 444 break;
408 } 445 }
@@ -415,31 +452,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
415#define __EXEC_OBJECT_HAS_FENCE (1<<30) 452#define __EXEC_OBJECT_HAS_FENCE (1<<30)
416 453
417static int 454static int
418need_reloc_mappable(struct drm_i915_gem_object *obj) 455need_reloc_mappable(struct i915_vma *vma)
419{ 456{
420 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 457 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
421 return entry->relocation_count && !use_cpu_reloc(obj); 458 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
459 i915_is_ggtt(vma->vm);
422} 460}
423 461
424static int 462static int
425i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, 463i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
426 struct intel_ring_buffer *ring, 464 struct intel_ring_buffer *ring,
427 struct i915_address_space *vm, 465 bool *need_reloc)
428 bool *need_reloc)
429{ 466{
430 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 467 struct drm_i915_private *dev_priv = ring->dev->dev_private;
431 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 468 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
432 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 469 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
433 bool need_fence, need_mappable; 470 bool need_fence, need_mappable;
471 struct drm_i915_gem_object *obj = vma->obj;
434 int ret; 472 int ret;
435 473
436 need_fence = 474 need_fence =
437 has_fenced_gpu_access && 475 has_fenced_gpu_access &&
438 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 476 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
439 obj->tiling_mode != I915_TILING_NONE; 477 obj->tiling_mode != I915_TILING_NONE;
440 need_mappable = need_fence || need_reloc_mappable(obj); 478 need_mappable = need_fence || need_reloc_mappable(vma);
441 479
442 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, 480 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
443 false); 481 false);
444 if (ret) 482 if (ret)
445 return ret; 483 return ret;
@@ -467,8 +505,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
467 obj->has_aliasing_ppgtt_mapping = 1; 505 obj->has_aliasing_ppgtt_mapping = 1;
468 } 506 }
469 507
470 if (entry->offset != i915_gem_obj_offset(obj, vm)) { 508 if (entry->offset != vma->node.start) {
471 entry->offset = i915_gem_obj_offset(obj, vm); 509 entry->offset = vma->node.start;
472 *need_reloc = true; 510 *need_reloc = true;
473 } 511 }
474 512
@@ -485,14 +523,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
485} 523}
486 524
487static void 525static void
488i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) 526i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
489{ 527{
490 struct drm_i915_gem_exec_object2 *entry; 528 struct drm_i915_gem_exec_object2 *entry;
529 struct drm_i915_gem_object *obj = vma->obj;
491 530
492 if (!i915_gem_obj_bound_any(obj)) 531 if (!drm_mm_node_allocated(&vma->node))
493 return; 532 return;
494 533
495 entry = obj->exec_entry; 534 entry = vma->exec_entry;
496 535
497 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) 536 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
498 i915_gem_object_unpin_fence(obj); 537 i915_gem_object_unpin_fence(obj);
@@ -505,41 +544,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
505 544
506static int 545static int
507i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 546i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
508 struct list_head *objects, 547 struct list_head *vmas,
509 struct i915_address_space *vm,
510 bool *need_relocs) 548 bool *need_relocs)
511{ 549{
512 struct drm_i915_gem_object *obj; 550 struct drm_i915_gem_object *obj;
513 struct list_head ordered_objects; 551 struct i915_vma *vma;
552 struct i915_address_space *vm;
553 struct list_head ordered_vmas;
514 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 554 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
515 int retry; 555 int retry;
516 556
517 INIT_LIST_HEAD(&ordered_objects); 557 if (list_empty(vmas))
518 while (!list_empty(objects)) { 558 return 0;
559
560 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
561
562 INIT_LIST_HEAD(&ordered_vmas);
563 while (!list_empty(vmas)) {
519 struct drm_i915_gem_exec_object2 *entry; 564 struct drm_i915_gem_exec_object2 *entry;
520 bool need_fence, need_mappable; 565 bool need_fence, need_mappable;
521 566
522 obj = list_first_entry(objects, 567 vma = list_first_entry(vmas, struct i915_vma, exec_list);
523 struct drm_i915_gem_object, 568 obj = vma->obj;
524 exec_list); 569 entry = vma->exec_entry;
525 entry = obj->exec_entry;
526 570
527 need_fence = 571 need_fence =
528 has_fenced_gpu_access && 572 has_fenced_gpu_access &&
529 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 573 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
530 obj->tiling_mode != I915_TILING_NONE; 574 obj->tiling_mode != I915_TILING_NONE;
531 need_mappable = need_fence || need_reloc_mappable(obj); 575 need_mappable = need_fence || need_reloc_mappable(vma);
532 576
533 if (need_mappable) 577 if (need_mappable)
534 list_move(&obj->exec_list, &ordered_objects); 578 list_move(&vma->exec_list, &ordered_vmas);
535 else 579 else
536 list_move_tail(&obj->exec_list, &ordered_objects); 580 list_move_tail(&vma->exec_list, &ordered_vmas);
537 581
538 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; 582 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
539 obj->base.pending_write_domain = 0; 583 obj->base.pending_write_domain = 0;
540 obj->pending_fenced_gpu_access = false; 584 obj->pending_fenced_gpu_access = false;
541 } 585 }
542 list_splice(&ordered_objects, objects); 586 list_splice(&ordered_vmas, vmas);
543 587
544 /* Attempt to pin all of the buffers into the GTT. 588 /* Attempt to pin all of the buffers into the GTT.
545 * This is done in 3 phases: 589 * This is done in 3 phases:
@@ -558,52 +602,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
558 int ret = 0; 602 int ret = 0;
559 603
560 /* Unbind any ill-fitting objects or pin. */ 604 /* Unbind any ill-fitting objects or pin. */
561 list_for_each_entry(obj, objects, exec_list) { 605 list_for_each_entry(vma, vmas, exec_list) {
562 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 606 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
563 bool need_fence, need_mappable; 607 bool need_fence, need_mappable;
564 u32 obj_offset;
565 608
566 if (!i915_gem_obj_bound(obj, vm)) 609 obj = vma->obj;
610
611 if (!drm_mm_node_allocated(&vma->node))
567 continue; 612 continue;
568 613
569 obj_offset = i915_gem_obj_offset(obj, vm);
570 need_fence = 614 need_fence =
571 has_fenced_gpu_access && 615 has_fenced_gpu_access &&
572 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 616 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
573 obj->tiling_mode != I915_TILING_NONE; 617 obj->tiling_mode != I915_TILING_NONE;
574 need_mappable = need_fence || need_reloc_mappable(obj); 618 need_mappable = need_fence || need_reloc_mappable(vma);
575 619
576 WARN_ON((need_mappable || need_fence) && 620 WARN_ON((need_mappable || need_fence) &&
577 !i915_is_ggtt(vm)); 621 !i915_is_ggtt(vma->vm));
578 622
579 if ((entry->alignment && 623 if ((entry->alignment &&
580 obj_offset & (entry->alignment - 1)) || 624 vma->node.start & (entry->alignment - 1)) ||
581 (need_mappable && !obj->map_and_fenceable)) 625 (need_mappable && !obj->map_and_fenceable))
582 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); 626 ret = i915_vma_unbind(vma);
583 else 627 else
584 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); 628 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
585 if (ret) 629 if (ret)
586 goto err; 630 goto err;
587 } 631 }
588 632
589 /* Bind fresh objects */ 633 /* Bind fresh objects */
590 list_for_each_entry(obj, objects, exec_list) { 634 list_for_each_entry(vma, vmas, exec_list) {
591 if (i915_gem_obj_bound(obj, vm)) 635 if (drm_mm_node_allocated(&vma->node))
592 continue; 636 continue;
593 637
594 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); 638 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
595 if (ret) 639 if (ret)
596 goto err; 640 goto err;
597 } 641 }
598 642
599err: /* Decrement pin count for bound objects */ 643err: /* Decrement pin count for bound objects */
600 list_for_each_entry(obj, objects, exec_list) 644 list_for_each_entry(vma, vmas, exec_list)
601 i915_gem_execbuffer_unreserve_object(obj); 645 i915_gem_execbuffer_unreserve_vma(vma);
602 646
603 if (ret != -ENOSPC || retry++) 647 if (ret != -ENOSPC || retry++)
604 return ret; 648 return ret;
605 649
606 ret = i915_gem_evict_everything(ring->dev); 650 ret = i915_gem_evict_vm(vm, true);
607 if (ret) 651 if (ret)
608 return ret; 652 return ret;
609 } while (1); 653 } while (1);
@@ -614,24 +658,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
614 struct drm_i915_gem_execbuffer2 *args, 658 struct drm_i915_gem_execbuffer2 *args,
615 struct drm_file *file, 659 struct drm_file *file,
616 struct intel_ring_buffer *ring, 660 struct intel_ring_buffer *ring,
617 struct eb_objects *eb, 661 struct eb_vmas *eb,
618 struct drm_i915_gem_exec_object2 *exec, 662 struct drm_i915_gem_exec_object2 *exec)
619 struct i915_address_space *vm)
620{ 663{
621 struct drm_i915_gem_relocation_entry *reloc; 664 struct drm_i915_gem_relocation_entry *reloc;
622 struct drm_i915_gem_object *obj; 665 struct i915_address_space *vm;
666 struct i915_vma *vma;
623 bool need_relocs; 667 bool need_relocs;
624 int *reloc_offset; 668 int *reloc_offset;
625 int i, total, ret; 669 int i, total, ret;
626 int count = args->buffer_count; 670 unsigned count = args->buffer_count;
671
672 if (WARN_ON(list_empty(&eb->vmas)))
673 return 0;
674
675 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
627 676
628 /* We may process another execbuffer during the unlock... */ 677 /* We may process another execbuffer during the unlock... */
629 while (!list_empty(&eb->objects)) { 678 while (!list_empty(&eb->vmas)) {
630 obj = list_first_entry(&eb->objects, 679 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
631 struct drm_i915_gem_object, 680 list_del_init(&vma->exec_list);
632 exec_list); 681 drm_gem_object_unreference(&vma->obj->base);
633 list_del_init(&obj->exec_list);
634 drm_gem_object_unreference(&obj->base);
635 } 682 }
636 683
637 mutex_unlock(&dev->struct_mutex); 684 mutex_unlock(&dev->struct_mutex);
@@ -695,20 +742,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
695 742
696 /* reacquire the objects */ 743 /* reacquire the objects */
697 eb_reset(eb); 744 eb_reset(eb);
698 ret = eb_lookup_objects(eb, exec, args, file); 745 ret = eb_lookup_vmas(eb, exec, args, vm, file);
699 if (ret) 746 if (ret)
700 goto err; 747 goto err;
701 748
702 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 749 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
703 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); 750 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
704 if (ret) 751 if (ret)
705 goto err; 752 goto err;
706 753
707 list_for_each_entry(obj, &eb->objects, exec_list) { 754 list_for_each_entry(vma, &eb->vmas, exec_list) {
708 int offset = obj->exec_entry - exec; 755 int offset = vma->exec_entry - exec;
709 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 756 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
710 reloc + reloc_offset[offset], 757 reloc + reloc_offset[offset]);
711 vm);
712 if (ret) 758 if (ret)
713 goto err; 759 goto err;
714 } 760 }
@@ -727,14 +773,15 @@ err:
727 773
728static int 774static int
729i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 775i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
730 struct list_head *objects) 776 struct list_head *vmas)
731{ 777{
732 struct drm_i915_gem_object *obj; 778 struct i915_vma *vma;
733 uint32_t flush_domains = 0; 779 uint32_t flush_domains = 0;
734 bool flush_chipset = false; 780 bool flush_chipset = false;
735 int ret; 781 int ret;
736 782
737 list_for_each_entry(obj, objects, exec_list) { 783 list_for_each_entry(vma, vmas, exec_list) {
784 struct drm_i915_gem_object *obj = vma->obj;
738 ret = i915_gem_object_sync(obj, ring); 785 ret = i915_gem_object_sync(obj, ring);
739 if (ret) 786 if (ret)
740 return ret; 787 return ret;
@@ -771,8 +818,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
771 int count) 818 int count)
772{ 819{
773 int i; 820 int i;
774 int relocs_total = 0; 821 unsigned relocs_total = 0;
775 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 822 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
776 823
777 for (i = 0; i < count; i++) { 824 for (i = 0; i < count; i++) {
778 char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 825 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +856,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
809} 856}
810 857
811static void 858static void
812i915_gem_execbuffer_move_to_active(struct list_head *objects, 859i915_gem_execbuffer_move_to_active(struct list_head *vmas,
813 struct i915_address_space *vm,
814 struct intel_ring_buffer *ring) 860 struct intel_ring_buffer *ring)
815{ 861{
816 struct drm_i915_gem_object *obj; 862 struct i915_vma *vma;
817 863
818 list_for_each_entry(obj, objects, exec_list) { 864 list_for_each_entry(vma, vmas, exec_list) {
865 struct drm_i915_gem_object *obj = vma->obj;
819 u32 old_read = obj->base.read_domains; 866 u32 old_read = obj->base.read_domains;
820 u32 old_write = obj->base.write_domain; 867 u32 old_write = obj->base.write_domain;
821 868
@@ -825,9 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
825 obj->base.read_domains = obj->base.pending_read_domains; 872 obj->base.read_domains = obj->base.pending_read_domains;
826 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 873 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
827 874
828 /* FIXME: This lookup gets fixed later <-- danvet */ 875 i915_vma_move_to_active(vma, ring);
829 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
830 i915_gem_object_move_to_active(obj, ring);
831 if (obj->base.write_domain) { 876 if (obj->base.write_domain) {
832 obj->dirty = 1; 877 obj->dirty = 1;
833 obj->last_write_seqno = intel_ring_get_seqno(ring); 878 obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +930,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
885 struct i915_address_space *vm) 930 struct i915_address_space *vm)
886{ 931{
887 drm_i915_private_t *dev_priv = dev->dev_private; 932 drm_i915_private_t *dev_priv = dev->dev_private;
888 struct eb_objects *eb; 933 struct eb_vmas *eb;
889 struct drm_i915_gem_object *batch_obj; 934 struct drm_i915_gem_object *batch_obj;
890 struct drm_clip_rect *cliprects = NULL; 935 struct drm_clip_rect *cliprects = NULL;
891 struct intel_ring_buffer *ring; 936 struct intel_ring_buffer *ring;
937 struct i915_ctx_hang_stats *hs;
892 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 938 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
893 u32 exec_start, exec_len; 939 u32 exec_start, exec_len;
894 u32 mask, flags; 940 u32 mask, flags;
@@ -1000,7 +1046,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1000 return -EINVAL; 1046 return -EINVAL;
1001 } 1047 }
1002 1048
1003 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1049 cliprects = kcalloc(args->num_cliprects,
1050 sizeof(*cliprects),
1004 GFP_KERNEL); 1051 GFP_KERNEL);
1005 if (cliprects == NULL) { 1052 if (cliprects == NULL) {
1006 ret = -ENOMEM; 1053 ret = -ENOMEM;
@@ -1025,7 +1072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1025 goto pre_mutex_err; 1072 goto pre_mutex_err;
1026 } 1073 }
1027 1074
1028 eb = eb_create(args); 1075 eb = eb_create(args, vm);
1029 if (eb == NULL) { 1076 if (eb == NULL) {
1030 mutex_unlock(&dev->struct_mutex); 1077 mutex_unlock(&dev->struct_mutex);
1031 ret = -ENOMEM; 1078 ret = -ENOMEM;
@@ -1033,18 +1080,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1033 } 1080 }
1034 1081
1035 /* Look up object handles */ 1082 /* Look up object handles */
1036 ret = eb_lookup_objects(eb, exec, args, file); 1083 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1037 if (ret) 1084 if (ret)
1038 goto err; 1085 goto err;
1039 1086
1040 /* take note of the batch buffer before we might reorder the lists */ 1087 /* take note of the batch buffer before we might reorder the lists */
1041 batch_obj = list_entry(eb->objects.prev, 1088 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1042 struct drm_i915_gem_object,
1043 exec_list);
1044 1089
1045 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1090 /* Move the objects en-masse into the GTT, evicting if necessary. */
1046 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1091 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1047 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); 1092 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1048 if (ret) 1093 if (ret)
1049 goto err; 1094 goto err;
1050 1095
@@ -1054,7 +1099,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1054 if (ret) { 1099 if (ret) {
1055 if (ret == -EFAULT) { 1100 if (ret == -EFAULT) {
1056 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, 1101 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1057 eb, exec, vm); 1102 eb, exec);
1058 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1103 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1059 } 1104 }
1060 if (ret) 1105 if (ret)
@@ -1076,10 +1121,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1076 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1121 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1077 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1122 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1078 1123
1079 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); 1124 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1080 if (ret) 1125 if (ret)
1081 goto err; 1126 goto err;
1082 1127
1128 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
1129 if (IS_ERR(hs)) {
1130 ret = PTR_ERR(hs);
1131 goto err;
1132 }
1133
1134 if (hs->banned) {
1135 ret = -EIO;
1136 goto err;
1137 }
1138
1083 ret = i915_switch_context(ring, file, ctx_id); 1139 ret = i915_switch_context(ring, file, ctx_id);
1084 if (ret) 1140 if (ret)
1085 goto err; 1141 goto err;
@@ -1131,7 +1187,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1131 1187
1132 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1188 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1133 1189
1134 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); 1190 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1135 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1191 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1136 1192
1137err: 1193err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 212f6d8c35ec..e999496532c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -336,7 +336,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
336 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 336 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
337 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 337 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
338 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 338 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
339 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 339 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
340 GFP_KERNEL); 340 GFP_KERNEL);
341 if (!ppgtt->pt_pages) 341 if (!ppgtt->pt_pages)
342 return -ENOMEM; 342 return -ENOMEM;
@@ -347,7 +347,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
347 goto err_pt_alloc; 347 goto err_pt_alloc;
348 } 348 }
349 349
350 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, 350 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
351 GFP_KERNEL); 351 GFP_KERNEL);
352 if (!ppgtt->pt_dma_addr) 352 if (!ppgtt->pt_dma_addr)
353 goto err_pt_alloc; 353 goto err_pt_alloc;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e15a1d90037d..d284d892ed94 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
395 if (gtt_offset == I915_GTT_OFFSET_NONE) 395 if (gtt_offset == I915_GTT_OFFSET_NONE)
396 return obj; 396 return obj;
397 397
398 vma = i915_gem_vma_create(obj, ggtt); 398 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
399 if (IS_ERR(vma)) { 399 if (IS_ERR(vma)) {
400 ret = PTR_ERR(vma); 400 ret = PTR_ERR(vma);
401 goto err_out; 401 goto err_out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef9c896..ac9ebe98f8b0 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
393 /* Try to preallocate memory required to save swizzling on put-pages */ 393 /* Try to preallocate memory required to save swizzling on put-pages */
394 if (i915_gem_object_needs_bit17_swizzle(obj)) { 394 if (i915_gem_object_needs_bit17_swizzle(obj)) {
395 if (obj->bit_17 == NULL) { 395 if (obj->bit_17 == NULL) {
396 obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * 396 obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
397 sizeof(long), GFP_KERNEL); 397 sizeof(long), GFP_KERNEL);
398 } 398 }
399 } else { 399 } else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
504 int i; 504 int i;
505 505
506 if (obj->bit_17 == NULL) { 506 if (obj->bit_17 == NULL) {
507 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 507 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
508 sizeof(long), GFP_KERNEL); 508 sizeof(long), GFP_KERNEL);
509 if (obj->bit_17 == NULL) { 509 if (obj->bit_17 == NULL) {
510 DRM_ERROR("Failed to allocate memory for bit 17 " 510 DRM_ERROR("Failed to allocate memory for bit 17 "
511 "record\n"); 511 "record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index dae364f0028c..915c8ca08969 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
215 } 215 }
216} 216}
217 217
218static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
219{
220 switch (a) {
221 case HANGCHECK_IDLE:
222 return "idle";
223 case HANGCHECK_WAIT:
224 return "wait";
225 case HANGCHECK_ACTIVE:
226 return "active";
227 case HANGCHECK_KICK:
228 return "kick";
229 case HANGCHECK_HUNG:
230 return "hung";
231 }
232
233 return "unknown";
234}
235
218static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 236static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
219 struct drm_device *dev, 237 struct drm_device *dev,
220 struct drm_i915_error_state *error, 238 struct drm_i915_error_state *error,
@@ -255,6 +273,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
255 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 273 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
256 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 274 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
257 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 275 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
276 err_printf(m, " hangcheck: %s [%d]\n",
277 hangcheck_action_to_str(error->hangcheck_action[ring]),
278 error->hangcheck_score[ring]);
258} 279}
259 280
260void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 281void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +304,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
283 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 304 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
284 error->time.tv_usec); 305 error->time.tv_usec);
285 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 306 err_printf(m, "Kernel: " UTS_RELEASE "\n");
286 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 307 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
287 err_printf(m, "EIR: 0x%08x\n", error->eir); 308 err_printf(m, "EIR: 0x%08x\n", error->eir);
288 err_printf(m, "IER: 0x%08x\n", error->ier); 309 err_printf(m, "IER: 0x%08x\n", error->ier);
289 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 310 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
290 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 311 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
291 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 312 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
292 err_printf(m, "CCID: 0x%08x\n", error->ccid); 313 err_printf(m, "CCID: 0x%08x\n", error->ccid);
314 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
293 315
294 for (i = 0; i < dev_priv->num_fence_regs; i++) 316 for (i = 0; i < dev_priv->num_fence_regs; i++)
295 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 317 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -720,6 +742,9 @@ static void i915_record_ring_state(struct drm_device *dev,
720 742
721 error->cpu_ring_head[ring->id] = ring->head; 743 error->cpu_ring_head[ring->id] = ring->head;
722 error->cpu_ring_tail[ring->id] = ring->tail; 744 error->cpu_ring_tail[ring->id] = ring->tail;
745
746 error->hangcheck_score[ring->id] = ring->hangcheck.score;
747 error->hangcheck_action[ring->id] = ring->hangcheck.action;
723} 748}
724 749
725 750
@@ -769,7 +794,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
769 794
770 error->ring[i].num_requests = count; 795 error->ring[i].num_requests = count;
771 error->ring[i].requests = 796 error->ring[i].requests =
772 kmalloc(count*sizeof(struct drm_i915_error_request), 797 kcalloc(count, sizeof(*error->ring[i].requests),
773 GFP_ATOMIC); 798 GFP_ATOMIC);
774 if (error->ring[i].requests == NULL) { 799 if (error->ring[i].requests == NULL) {
775 error->ring[i].num_requests = 0; 800 error->ring[i].num_requests = 0;
@@ -811,7 +836,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
811 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 836 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
812 837
813 if (i) { 838 if (i) {
814 active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); 839 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
815 if (active_bo) 840 if (active_bo)
816 pinned_bo = active_bo + error->active_bo_count[ndx]; 841 pinned_bo = active_bo + error->active_bo_count[ndx];
817 } 842 }
@@ -988,6 +1013,7 @@ const char *i915_cache_level_str(int type)
988 case I915_CACHE_NONE: return " uncached"; 1013 case I915_CACHE_NONE: return " uncached";
989 case I915_CACHE_LLC: return " snooped or LLC"; 1014 case I915_CACHE_LLC: return " snooped or LLC";
990 case I915_CACHE_L3_LLC: return " L3+LLC"; 1015 case I915_CACHE_L3_LLC: return " L3+LLC";
1016 case I915_CACHE_WT: return " WT";
991 default: return ""; 1017 default: return "";
992 } 1018 }
993} 1019}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228fd9bd..d1739d3bdae9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -665,7 +665,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
665 crtc); 665 crtc);
666} 666}
667 667
668static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 668static bool intel_hpd_irq_event(struct drm_device *dev,
669 struct drm_connector *connector)
669{ 670{
670 enum drm_connector_status old_status; 671 enum drm_connector_status old_status;
671 672
@@ -673,11 +674,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
673 old_status = connector->status; 674 old_status = connector->status;
674 675
675 connector->status = connector->funcs->detect(connector, false); 676 connector->status = connector->funcs->detect(connector, false);
676 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 677 if (old_status == connector->status)
678 return false;
679
680 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
677 connector->base.id, 681 connector->base.id,
678 drm_get_connector_name(connector), 682 drm_get_connector_name(connector),
679 old_status, connector->status); 683 drm_get_connector_status_name(old_status),
680 return (old_status != connector->status); 684 drm_get_connector_status_name(connector->status));
685
686 return true;
681} 687}
682 688
683/* 689/*
@@ -801,7 +807,7 @@ static void notify_ring(struct drm_device *dev,
801 if (ring->obj == NULL) 807 if (ring->obj == NULL)
802 return; 808 return;
803 809
804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 810 trace_i915_gem_request_complete(ring);
805 811
806 wake_up_all(&ring->irq_queue); 812 wake_up_all(&ring->irq_queue);
807 i915_queue_hangcheck(dev); 813 i915_queue_hangcheck(dev);
@@ -812,7 +818,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 818 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
813 rps.work); 819 rps.work);
814 u32 pm_iir; 820 u32 pm_iir;
815 u8 new_delay; 821 int new_delay, adj;
816 822
817 spin_lock_irq(&dev_priv->irq_lock); 823 spin_lock_irq(&dev_priv->irq_lock);
818 pm_iir = dev_priv->rps.pm_iir; 824 pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +835,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
829 835
830 mutex_lock(&dev_priv->rps.hw_lock); 836 mutex_lock(&dev_priv->rps.hw_lock);
831 837
838 adj = dev_priv->rps.last_adj;
832 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 839 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
833 new_delay = dev_priv->rps.cur_delay + 1; 840 if (adj > 0)
841 adj *= 2;
842 else
843 adj = 1;
844 new_delay = dev_priv->rps.cur_delay + adj;
834 845
835 /* 846 /*
836 * For better performance, jump directly 847 * For better performance, jump directly
837 * to RPe if we're below it. 848 * to RPe if we're below it.
838 */ 849 */
839 if (IS_VALLEYVIEW(dev_priv->dev) && 850 if (new_delay < dev_priv->rps.rpe_delay)
840 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
841 new_delay = dev_priv->rps.rpe_delay; 851 new_delay = dev_priv->rps.rpe_delay;
842 } else 852 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
843 new_delay = dev_priv->rps.cur_delay - 1; 853 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
854 new_delay = dev_priv->rps.rpe_delay;
855 else
856 new_delay = dev_priv->rps.min_delay;
857 adj = 0;
858 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
859 if (adj < 0)
860 adj *= 2;
861 else
862 adj = -1;
863 new_delay = dev_priv->rps.cur_delay + adj;
864 } else { /* unknown event */
865 new_delay = dev_priv->rps.cur_delay;
866 }
844 867
845 /* sysfs frequency interfaces may have snuck in while servicing the 868 /* sysfs frequency interfaces may have snuck in while servicing the
846 * interrupt 869 * interrupt
847 */ 870 */
848 if (new_delay >= dev_priv->rps.min_delay && 871 if (new_delay < (int)dev_priv->rps.min_delay)
849 new_delay <= dev_priv->rps.max_delay) { 872 new_delay = dev_priv->rps.min_delay;
850 if (IS_VALLEYVIEW(dev_priv->dev)) 873 if (new_delay > (int)dev_priv->rps.max_delay)
851 valleyview_set_rps(dev_priv->dev, new_delay); 874 new_delay = dev_priv->rps.max_delay;
852 else 875 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
853 gen6_set_rps(dev_priv->dev, new_delay); 876
854 } 877 if (IS_VALLEYVIEW(dev_priv->dev))
855 878 valleyview_set_rps(dev_priv->dev, new_delay);
856 if (IS_VALLEYVIEW(dev_priv->dev)) { 879 else
857 /* 880 gen6_set_rps(dev_priv->dev, new_delay);
858 * On VLV, when we enter RC6 we may not be at the minimum
859 * voltage level, so arm a timer to check. It should only
860 * fire when there's activity or once after we've entered
861 * RC6, and then won't be re-armed until the next RPS interrupt.
862 */
863 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
864 msecs_to_jiffies(100));
865 }
866 881
867 mutex_unlock(&dev_priv->rps.hw_lock); 882 mutex_unlock(&dev_priv->rps.hw_lock);
868} 883}
@@ -882,9 +897,10 @@ static void ivybridge_parity_work(struct work_struct *work)
882 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 897 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
883 l3_parity.error_work); 898 l3_parity.error_work);
884 u32 error_status, row, bank, subbank; 899 u32 error_status, row, bank, subbank;
885 char *parity_event[5]; 900 char *parity_event[6];
886 uint32_t misccpctl; 901 uint32_t misccpctl;
887 unsigned long flags; 902 unsigned long flags;
903 uint8_t slice = 0;
888 904
889 /* We must turn off DOP level clock gating to access the L3 registers. 905 /* We must turn off DOP level clock gating to access the L3 registers.
890 * In order to prevent a get/put style interface, acquire struct mutex 906 * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +908,81 @@ static void ivybridge_parity_work(struct work_struct *work)
892 */ 908 */
893 mutex_lock(&dev_priv->dev->struct_mutex); 909 mutex_lock(&dev_priv->dev->struct_mutex);
894 910
911 /* If we've screwed up tracking, just let the interrupt fire again */
912 if (WARN_ON(!dev_priv->l3_parity.which_slice))
913 goto out;
914
895 misccpctl = I915_READ(GEN7_MISCCPCTL); 915 misccpctl = I915_READ(GEN7_MISCCPCTL);
896 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 916 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
897 POSTING_READ(GEN7_MISCCPCTL); 917 POSTING_READ(GEN7_MISCCPCTL);
898 918
899 error_status = I915_READ(GEN7_L3CDERRST1); 919 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
900 row = GEN7_PARITY_ERROR_ROW(error_status); 920 u32 reg;
901 bank = GEN7_PARITY_ERROR_BANK(error_status);
902 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
903 921
904 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 922 slice--;
905 GEN7_L3CDERRST1_ENABLE); 923 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
906 POSTING_READ(GEN7_L3CDERRST1); 924 break;
907 925
908 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 926 dev_priv->l3_parity.which_slice &= ~(1<<slice);
909 927
910 spin_lock_irqsave(&dev_priv->irq_lock, flags); 928 reg = GEN7_L3CDERRST1 + (slice * 0x200);
911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
913 929
914 mutex_unlock(&dev_priv->dev->struct_mutex); 930 error_status = I915_READ(reg);
931 row = GEN7_PARITY_ERROR_ROW(error_status);
932 bank = GEN7_PARITY_ERROR_BANK(error_status);
933 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
934
935 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
936 POSTING_READ(reg);
915 937
916 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 938 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 939 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 940 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 941 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
920 parity_event[4] = NULL; 942 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
943 parity_event[5] = NULL;
921 944
922 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 945 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
923 KOBJ_CHANGE, parity_event); 946 KOBJ_CHANGE, parity_event);
924 947
925 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 948 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
926 row, bank, subbank); 949 slice, row, bank, subbank);
927 950
928 kfree(parity_event[3]); 951 kfree(parity_event[4]);
929 kfree(parity_event[2]); 952 kfree(parity_event[3]);
930 kfree(parity_event[1]); 953 kfree(parity_event[2]);
954 kfree(parity_event[1]);
955 }
956
957 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
958
959out:
960 WARN_ON(dev_priv->l3_parity.which_slice);
961 spin_lock_irqsave(&dev_priv->irq_lock, flags);
962 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
963 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
964
965 mutex_unlock(&dev_priv->dev->struct_mutex);
931} 966}
932 967
933static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 968static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
934{ 969{
935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 970 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
936 971
937 if (!HAS_L3_GPU_CACHE(dev)) 972 if (!HAS_L3_DPF(dev))
938 return; 973 return;
939 974
940 spin_lock(&dev_priv->irq_lock); 975 spin_lock(&dev_priv->irq_lock);
941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 976 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
942 spin_unlock(&dev_priv->irq_lock); 977 spin_unlock(&dev_priv->irq_lock);
943 978
979 iir &= GT_PARITY_ERROR(dev);
980 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
981 dev_priv->l3_parity.which_slice |= 1 << 1;
982
983 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
984 dev_priv->l3_parity.which_slice |= 1 << 0;
985
944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 986 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
945} 987}
946 988
@@ -975,8 +1017,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
975 i915_handle_error(dev, false); 1017 i915_handle_error(dev, false);
976 } 1018 }
977 1019
978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1020 if (gt_iir & GT_PARITY_ERROR(dev))
979 ivybridge_parity_error_irq_handler(dev); 1021 ivybridge_parity_error_irq_handler(dev, gt_iir);
980} 1022}
981 1023
982#define HPD_STORM_DETECT_PERIOD 1000 1024#define HPD_STORM_DETECT_PERIOD 1000
@@ -1388,7 +1430,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1430 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1389 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1431 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1390 irqreturn_t ret = IRQ_NONE; 1432 irqreturn_t ret = IRQ_NONE;
1391 bool err_int_reenable = false;
1392 1433
1393 atomic_inc(&dev_priv->irq_received); 1434 atomic_inc(&dev_priv->irq_received);
1394 1435
@@ -1412,17 +1453,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1412 POSTING_READ(SDEIER); 1453 POSTING_READ(SDEIER);
1413 } 1454 }
1414 1455
1415 /* On Haswell, also mask ERR_INT because we don't want to risk
1416 * generating "unclaimed register" interrupts from inside the interrupt
1417 * handler. */
1418 if (IS_HASWELL(dev)) {
1419 spin_lock(&dev_priv->irq_lock);
1420 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1421 if (err_int_reenable)
1422 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1423 spin_unlock(&dev_priv->irq_lock);
1424 }
1425
1426 gt_iir = I915_READ(GTIIR); 1456 gt_iir = I915_READ(GTIIR);
1427 if (gt_iir) { 1457 if (gt_iir) {
1428 if (INTEL_INFO(dev)->gen >= 6) 1458 if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1482,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1452 } 1482 }
1453 } 1483 }
1454 1484
1455 if (err_int_reenable) {
1456 spin_lock(&dev_priv->irq_lock);
1457 if (ivb_can_enable_err_int(dev))
1458 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1459 spin_unlock(&dev_priv->irq_lock);
1460 }
1461
1462 I915_WRITE(DEIER, de_ier); 1485 I915_WRITE(DEIER, de_ier);
1463 POSTING_READ(DEIER); 1486 POSTING_READ(DEIER);
1464 if (!HAS_PCH_NOP(dev)) { 1487 if (!HAS_PCH_NOP(dev)) {
@@ -1516,7 +1539,7 @@ static void i915_error_work_func(struct work_struct *work)
1516 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1539 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1517 int ret; 1540 int ret;
1518 1541
1519 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1542 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
1520 1543
1521 /* 1544 /*
1522 * Note that there's only one work item which does gpu resets, so we 1545 * Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1553,7 @@ static void i915_error_work_func(struct work_struct *work)
1530 */ 1553 */
1531 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1554 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1532 DRM_DEBUG_DRIVER("resetting chip\n"); 1555 DRM_DEBUG_DRIVER("resetting chip\n");
1533 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1556 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
1534 reset_event); 1557 reset_event);
1535 1558
1536 /* 1559 /*
@@ -1557,7 +1580,7 @@ static void i915_error_work_func(struct work_struct *work)
1557 smp_mb__before_atomic_inc(); 1580 smp_mb__before_atomic_inc();
1558 atomic_inc(&dev_priv->gpu_error.reset_counter); 1581 atomic_inc(&dev_priv->gpu_error.reset_counter);
1559 1582
1560 kobject_uevent_env(&dev->primary->kdev.kobj, 1583 kobject_uevent_env(&dev->primary->kdev->kobj,
1561 KOBJ_CHANGE, reset_done_event); 1584 KOBJ_CHANGE, reset_done_event);
1562 } else { 1585 } else {
1563 atomic_set(&error->reset_counter, I915_WEDGED); 1586 atomic_set(&error->reset_counter, I915_WEDGED);
@@ -2021,12 +2044,17 @@ static void i915_hangcheck_elapsed(unsigned long data)
2021 2044
2022 if (ring->hangcheck.seqno == seqno) { 2045 if (ring->hangcheck.seqno == seqno) {
2023 if (ring_idle(ring, seqno)) { 2046 if (ring_idle(ring, seqno)) {
2047 ring->hangcheck.action = HANGCHECK_IDLE;
2048
2024 if (waitqueue_active(&ring->irq_queue)) { 2049 if (waitqueue_active(&ring->irq_queue)) {
2025 /* Issue a wake-up to catch stuck h/w. */ 2050 /* Issue a wake-up to catch stuck h/w. */
2026 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2051 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2027 ring->name); 2052 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2028 wake_up_all(&ring->irq_queue); 2053 ring->name);
2029 ring->hangcheck.score += HUNG; 2054 wake_up_all(&ring->irq_queue);
2055 }
2056 /* Safeguard against driver failure */
2057 ring->hangcheck.score += BUSY;
2030 } else 2058 } else
2031 busy = false; 2059 busy = false;
2032 } else { 2060 } else {
@@ -2049,6 +2077,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2049 acthd); 2077 acthd);
2050 2078
2051 switch (ring->hangcheck.action) { 2079 switch (ring->hangcheck.action) {
2080 case HANGCHECK_IDLE:
2052 case HANGCHECK_WAIT: 2081 case HANGCHECK_WAIT:
2053 break; 2082 break;
2054 case HANGCHECK_ACTIVE: 2083 case HANGCHECK_ACTIVE:
@@ -2064,6 +2093,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
2064 } 2093 }
2065 } 2094 }
2066 } else { 2095 } else {
2096 ring->hangcheck.action = HANGCHECK_ACTIVE;
2097
2067 /* Gradually reduce the count so that we catch DoS 2098 /* Gradually reduce the count so that we catch DoS
2068 * attempts across multiple batches. 2099 * attempts across multiple batches.
2069 */ 2100 */
@@ -2254,10 +2285,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2254 pm_irqs = gt_irqs = 0; 2285 pm_irqs = gt_irqs = 0;
2255 2286
2256 dev_priv->gt_irq_mask = ~0; 2287 dev_priv->gt_irq_mask = ~0;
2257 if (HAS_L3_GPU_CACHE(dev)) { 2288 if (HAS_L3_DPF(dev)) {
2258 /* L3 parity interrupt is always unmasked. */ 2289 /* L3 parity interrupt is always unmasked. */
2259 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2290 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2260 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2291 gt_irqs |= GT_PARITY_ERROR(dev);
2261 } 2292 }
2262 2293
2263 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2294 gt_irqs |= GT_RENDER_USER_INTERRUPT;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c159e1a6810f..95385023e0ba 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -264,6 +264,11 @@
264#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ 264#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
265#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ 265#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
266#define MI_SEMAPHORE_SYNC_INVALID (3<<16) 266#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
267
268#define MI_PREDICATE_RESULT_2 (0x2214)
269#define LOWER_SLICE_ENABLED (1<<0)
270#define LOWER_SLICE_DISABLED (0<<0)
271
267/* 272/*
268 * 3D instructions used by the kernel 273 * 3D instructions used by the kernel
269 */ 274 */
@@ -346,12 +351,25 @@
346#define IOSF_PORT_PUNIT 0x4 351#define IOSF_PORT_PUNIT 0x4
347#define IOSF_PORT_NC 0x11 352#define IOSF_PORT_NC 0x11
348#define IOSF_PORT_DPIO 0x12 353#define IOSF_PORT_DPIO 0x12
354#define IOSF_PORT_GPIO_NC 0x13
355#define IOSF_PORT_CCK 0x14
356#define IOSF_PORT_CCU 0xA9
357#define IOSF_PORT_GPS_CORE 0x48
349#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) 358#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
350#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) 359#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
351 360
352#define PUNIT_OPCODE_REG_READ 6 361#define PUNIT_OPCODE_REG_READ 6
353#define PUNIT_OPCODE_REG_WRITE 7 362#define PUNIT_OPCODE_REG_WRITE 7
354 363
364#define PUNIT_REG_PWRGT_CTRL 0x60
365#define PUNIT_REG_PWRGT_STATUS 0x61
366#define PUNIT_CLK_GATE 1
367#define PUNIT_PWR_RESET 2
368#define PUNIT_PWR_GATE 3
369#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
370#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
371#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
372
355#define PUNIT_REG_GPU_LFM 0xd3 373#define PUNIT_REG_GPU_LFM 0xd3
356#define PUNIT_REG_GPU_FREQ_REQ 0xd4 374#define PUNIT_REG_GPU_FREQ_REQ 0xd4
357#define PUNIT_REG_GPU_FREQ_STS 0xd8 375#define PUNIT_REG_GPU_FREQ_STS 0xd8
@@ -372,6 +390,40 @@
372#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 390#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
373#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 391#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
374 392
393/* vlv2 north clock has */
394#define CCK_FUSE_REG 0x8
395#define CCK_FUSE_HPLL_FREQ_MASK 0x3
396#define CCK_REG_DSI_PLL_FUSE 0x44
397#define CCK_REG_DSI_PLL_CONTROL 0x48
398#define DSI_PLL_VCO_EN (1 << 31)
399#define DSI_PLL_LDO_GATE (1 << 30)
400#define DSI_PLL_P1_POST_DIV_SHIFT 17
401#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
402#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
403#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
404#define DSI_PLL_MUX_MASK (3 << 9)
405#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
406#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
407#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
408#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
409#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
410#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
411#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
412#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
413#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
414#define DSI_PLL_LOCK (1 << 0)
415#define CCK_REG_DSI_PLL_DIVIDER 0x4c
416#define DSI_PLL_LFSR (1 << 31)
417#define DSI_PLL_FRACTION_EN (1 << 30)
418#define DSI_PLL_FRAC_COUNTER_SHIFT 27
419#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
420#define DSI_PLL_USYNC_CNT_SHIFT 18
421#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
422#define DSI_PLL_N1_DIV_SHIFT 16
423#define DSI_PLL_N1_DIV_MASK (3 << 16)
424#define DSI_PLL_M1_DIV_SHIFT 0
425#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
426
375/* 427/*
376 * DPIO - a special bus for various display related registers to hide behind 428 * DPIO - a special bus for various display related registers to hide behind
377 * 429 *
@@ -387,7 +439,7 @@
387#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 439#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
388#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 440#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
389#define DPIO_SFR_BYPASS (1<<1) 441#define DPIO_SFR_BYPASS (1<<1)
390#define DPIO_RESET (1<<0) 442#define DPIO_CMNRST (1<<0)
391 443
392#define _DPIO_TX3_SWING_CTL4_A 0x690 444#define _DPIO_TX3_SWING_CTL4_A 0x690
393#define _DPIO_TX3_SWING_CTL4_B 0x2a90 445#define _DPIO_TX3_SWING_CTL4_B 0x2a90
@@ -886,6 +938,7 @@
886#define GT_BLT_USER_INTERRUPT (1 << 22) 938#define GT_BLT_USER_INTERRUPT (1 << 22)
887#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) 939#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
888#define GT_BSD_USER_INTERRUPT (1 << 12) 940#define GT_BSD_USER_INTERRUPT (1 << 12)
941#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
889#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ 942#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
890#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) 943#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
891#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) 944#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -896,6 +949,10 @@
896#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */ 949#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
897#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */ 950#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
898 951
952#define GT_PARITY_ERROR(dev) \
953 (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
954 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
955
899/* These are all the "old" interrupts */ 956/* These are all the "old" interrupts */
900#define ILK_BSD_USER_INTERRUPT (1<<5) 957#define ILK_BSD_USER_INTERRUPT (1<<5)
901#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 958#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
@@ -1383,6 +1440,12 @@
1383 1440
1384#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) 1441#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
1385 1442
1443#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
1444#define CDCLK_FREQ_SHIFT 4
1445#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
1446#define CZCLK_FREQ_MASK 0xf
1447#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
1448
1386/* 1449/*
1387 * Palette regs 1450 * Palette regs
1388 */ 1451 */
@@ -1400,6 +1463,8 @@
1400 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in 1463 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
1401 * every way. It is not accessible from the CP register read instructions. 1464 * every way. It is not accessible from the CP register read instructions.
1402 * 1465 *
1466 * Starting from Haswell, you can't write registers using the MCHBAR mirror,
1467 * just read.
1403 */ 1468 */
1404#define MCHBAR_MIRROR_BASE 0x10000 1469#define MCHBAR_MIRROR_BASE 0x10000
1405 1470
@@ -1749,6 +1814,9 @@
1749 */ 1814 */
1750#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 1815#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1751 1816
1817#define VLV_CLK_CTL2 0x101104
1818#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
1819
1752/* 1820/*
1753 * Overlay regs 1821 * Overlay regs
1754 */ 1822 */
@@ -1800,7 +1868,8 @@
1800#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1868#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1801 1869
1802/* HSW eDP PSR registers */ 1870/* HSW eDP PSR registers */
1803#define EDP_PSR_CTL 0x64800 1871#define EDP_PSR_BASE(dev) 0x64800
1872#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
1804#define EDP_PSR_ENABLE (1<<31) 1873#define EDP_PSR_ENABLE (1<<31)
1805#define EDP_PSR_LINK_DISABLE (0<<27) 1874#define EDP_PSR_LINK_DISABLE (0<<27)
1806#define EDP_PSR_LINK_STANDBY (1<<27) 1875#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -1823,16 +1892,16 @@
1823#define EDP_PSR_TP1_TIME_0us (3<<4) 1892#define EDP_PSR_TP1_TIME_0us (3<<4)
1824#define EDP_PSR_IDLE_FRAME_SHIFT 0 1893#define EDP_PSR_IDLE_FRAME_SHIFT 0
1825 1894
1826#define EDP_PSR_AUX_CTL 0x64810 1895#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
1827#define EDP_PSR_AUX_DATA1 0x64814 1896#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
1828#define EDP_PSR_DPCD_COMMAND 0x80060000 1897#define EDP_PSR_DPCD_COMMAND 0x80060000
1829#define EDP_PSR_AUX_DATA2 0x64818 1898#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
1830#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) 1899#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
1831#define EDP_PSR_AUX_DATA3 0x6481c 1900#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
1832#define EDP_PSR_AUX_DATA4 0x64820 1901#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
1833#define EDP_PSR_AUX_DATA5 0x64824 1902#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
1834 1903
1835#define EDP_PSR_STATUS_CTL 0x64840 1904#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
1836#define EDP_PSR_STATUS_STATE_MASK (7<<29) 1905#define EDP_PSR_STATUS_STATE_MASK (7<<29)
1837#define EDP_PSR_STATUS_STATE_IDLE (0<<29) 1906#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
1838#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) 1907#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -1856,10 +1925,10 @@
1856#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) 1925#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
1857#define EDP_PSR_STATUS_IDLE_MASK 0xf 1926#define EDP_PSR_STATUS_IDLE_MASK 0xf
1858 1927
1859#define EDP_PSR_PERF_CNT 0x64844 1928#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
1860#define EDP_PSR_PERF_CNT_MASK 0xffffff 1929#define EDP_PSR_PERF_CNT_MASK 0xffffff
1861 1930
1862#define EDP_PSR_DEBUG_CTL 0x64860 1931#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
1863#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) 1932#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
1864#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) 1933#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
1865#define EDP_PSR_DEBUG_MASK_HPD (1<<25) 1934#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
@@ -2030,6 +2099,7 @@
2030 2099
2031/* Gen 4 SDVO/HDMI bits: */ 2100/* Gen 4 SDVO/HDMI bits: */
2032#define SDVO_COLOR_FORMAT_8bpc (0 << 26) 2101#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
2102#define SDVO_COLOR_FORMAT_MASK (7 << 26)
2033#define SDVO_ENCODING_SDVO (0 << 10) 2103#define SDVO_ENCODING_SDVO (0 << 10)
2034#define SDVO_ENCODING_HDMI (2 << 10) 2104#define SDVO_ENCODING_HDMI (2 << 10)
2035#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ 2105#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
@@ -2982,6 +3052,7 @@
2982#define PIPECONF_DISABLE 0 3052#define PIPECONF_DISABLE 0
2983#define PIPECONF_DOUBLE_WIDE (1<<30) 3053#define PIPECONF_DOUBLE_WIDE (1<<30)
2984#define I965_PIPECONF_ACTIVE (1<<30) 3054#define I965_PIPECONF_ACTIVE (1<<30)
3055#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
2985#define PIPECONF_FRAME_START_DELAY_MASK (3<<27) 3056#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
2986#define PIPECONF_SINGLE_WIDE 0 3057#define PIPECONF_SINGLE_WIDE 0
2987#define PIPECONF_PIPE_UNLOCKED 0 3058#define PIPECONF_PIPE_UNLOCKED 0
@@ -4407,6 +4478,8 @@
4407#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 4478#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
4408#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 4479#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
4409#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 4480#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
4481#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
4482#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
4410#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 4483#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
4411#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 4484#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
4412 4485
@@ -4438,7 +4511,6 @@
4438#define PANEL_PORT_SELECT_MASK (3 << 30) 4511#define PANEL_PORT_SELECT_MASK (3 << 30)
4439#define PANEL_PORT_SELECT_LVDS (0 << 30) 4512#define PANEL_PORT_SELECT_LVDS (0 << 30)
4440#define PANEL_PORT_SELECT_DPA (1 << 30) 4513#define PANEL_PORT_SELECT_DPA (1 << 30)
4441#define EDP_PANEL (1 << 30)
4442#define PANEL_PORT_SELECT_DPC (2 << 30) 4514#define PANEL_PORT_SELECT_DPC (2 << 30)
4443#define PANEL_PORT_SELECT_DPD (3 << 30) 4515#define PANEL_PORT_SELECT_DPD (3 << 30)
4444#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) 4516#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
@@ -4447,11 +4519,6 @@
4447#define PANEL_LIGHT_ON_DELAY_SHIFT 0 4519#define PANEL_LIGHT_ON_DELAY_SHIFT 0
4448 4520
4449#define PCH_PP_OFF_DELAYS 0xc720c 4521#define PCH_PP_OFF_DELAYS 0xc720c
4450#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
4451#define PANEL_POWER_PORT_LVDS (0 << 30)
4452#define PANEL_POWER_PORT_DP_A (1 << 30)
4453#define PANEL_POWER_PORT_DP_C (2 << 30)
4454#define PANEL_POWER_PORT_DP_D (3 << 30)
4455#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) 4522#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
4456#define PANEL_POWER_DOWN_DELAY_SHIFT 16 4523#define PANEL_POWER_DOWN_DELAY_SHIFT 16
4457#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) 4524#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4629,7 +4696,7 @@
4629#define GEN6_RP_UP_IDLE_MIN (0x1<<3) 4696#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
4630#define GEN6_RP_UP_BUSY_AVG (0x2<<3) 4697#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
4631#define GEN6_RP_UP_BUSY_CONT (0x4<<3) 4698#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
4632#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0) 4699#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
4633#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) 4700#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
4634#define GEN6_RP_UP_THRESHOLD 0xA02C 4701#define GEN6_RP_UP_THRESHOLD 0xA02C
4635#define GEN6_RP_DOWN_THRESHOLD 0xA030 4702#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4674,6 +4741,10 @@
4674 GEN6_PM_RP_DOWN_TIMEOUT) 4741 GEN6_PM_RP_DOWN_TIMEOUT)
4675 4742
4676#define GEN6_GT_GFX_RC6_LOCKED 0x138104 4743#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4744#define VLV_COUNTER_CONTROL 0x138104
4745#define VLV_COUNT_RANGE_HIGH (1<<15)
4746#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
4747#define VLV_RENDER_RC6_COUNT_EN (1<<0)
4677#define GEN6_GT_GFX_RC6 0x138108 4748#define GEN6_GT_GFX_RC6 0x138108
4678#define GEN6_GT_GFX_RC6p 0x13810C 4749#define GEN6_GT_GFX_RC6p 0x13810C
4679#define GEN6_GT_GFX_RC6pp 0x138110 4750#define GEN6_GT_GFX_RC6pp 0x138110
@@ -4685,6 +4756,8 @@
4685#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 4756#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
4686#define GEN6_PCODE_WRITE_RC6VIDS 0x4 4757#define GEN6_PCODE_WRITE_RC6VIDS 0x4
4687#define GEN6_PCODE_READ_RC6VIDS 0x5 4758#define GEN6_PCODE_READ_RC6VIDS 0x5
4759#define GEN6_PCODE_READ_D_COMP 0x10
4760#define GEN6_PCODE_WRITE_D_COMP 0x11
4688#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 4761#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
4689#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 4762#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
4690#define GEN6_PCODE_DATA 0x138128 4763#define GEN6_PCODE_DATA 0x138128
@@ -4704,6 +4777,7 @@
4704 4777
4705/* IVYBRIDGE DPF */ 4778/* IVYBRIDGE DPF */
4706#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ 4779#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
4780#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
4707#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) 4781#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
4708#define GEN7_PARITY_ERROR_VALID (1<<13) 4782#define GEN7_PARITY_ERROR_VALID (1<<13)
4709#define GEN7_L3CDERRST1_BANK_MASK (3<<11) 4783#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
@@ -4717,6 +4791,7 @@
4717#define GEN7_L3CDERRST1_ENABLE (1<<7) 4791#define GEN7_L3CDERRST1_ENABLE (1<<7)
4718 4792
4719#define GEN7_L3LOG_BASE 0xB070 4793#define GEN7_L3LOG_BASE 0xB070
4794#define HSW_L3LOG_BASE_SLICE1 0xB270
4720#define GEN7_L3LOG_SIZE 0x80 4795#define GEN7_L3LOG_SIZE 0x80
4721 4796
4722#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ 4797#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
@@ -5116,4 +5191,414 @@
5116#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) 5191#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
5117#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) 5192#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
5118 5193
5194/* VLV MIPI registers */
5195
5196#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
5197#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
5198#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
5199#define DPI_ENABLE (1 << 31) /* A + B */
5200#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
5201#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
5202#define DUAL_LINK_MODE_MASK (1 << 26)
5203#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
5204#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
5205#define DITHERING_ENABLE (1 << 25) /* A + B */
5206#define FLOPPED_HSTX (1 << 23)
5207#define DE_INVERT (1 << 19) /* XXX */
5208#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
5209#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
5210#define AFE_LATCHOUT (1 << 17)
5211#define LP_OUTPUT_HOLD (1 << 16)
5212#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
5213#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
5214#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
5215#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
5216#define CSB_SHIFT 9
5217#define CSB_MASK (3 << 9)
5218#define CSB_20MHZ (0 << 9)
5219#define CSB_10MHZ (1 << 9)
5220#define CSB_40MHZ (2 << 9)
5221#define BANDGAP_MASK (1 << 8)
5222#define BANDGAP_PNW_CIRCUIT (0 << 8)
5223#define BANDGAP_LNC_CIRCUIT (1 << 8)
5224#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
5225#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
5226#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
5227#define TEARING_EFFECT_SHIFT 2 /* A + B */
5228#define TEARING_EFFECT_MASK (3 << 2)
5229#define TEARING_EFFECT_OFF (0 << 2)
5230#define TEARING_EFFECT_DSI (1 << 2)
5231#define TEARING_EFFECT_GPIO (2 << 2)
5232#define LANE_CONFIGURATION_SHIFT 0
5233#define LANE_CONFIGURATION_MASK (3 << 0)
5234#define LANE_CONFIGURATION_4LANE (0 << 0)
5235#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
5236#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
5237
5238#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
5239#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
5240#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
5241#define TEARING_EFFECT_DELAY_SHIFT 0
5242#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
5243
5244/* XXX: all bits reserved */
5245#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
5246
5247/* MIPI DSI Controller and D-PHY registers */
5248
5249#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
5250#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
5251#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
5252#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
5253#define ULPS_STATE_MASK (3 << 1)
5254#define ULPS_STATE_ENTER (2 << 1)
5255#define ULPS_STATE_EXIT (1 << 1)
5256#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
5257#define DEVICE_READY (1 << 0)
5258
5259#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
5260#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
5261#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
5262#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
5263#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
5264#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
5265#define TEARING_EFFECT (1 << 31)
5266#define SPL_PKT_SENT_INTERRUPT (1 << 30)
5267#define GEN_READ_DATA_AVAIL (1 << 29)
5268#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
5269#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
5270#define RX_PROT_VIOLATION (1 << 26)
5271#define RX_INVALID_TX_LENGTH (1 << 25)
5272#define ACK_WITH_NO_ERROR (1 << 24)
5273#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
5274#define LP_RX_TIMEOUT (1 << 22)
5275#define HS_TX_TIMEOUT (1 << 21)
5276#define DPI_FIFO_UNDERRUN (1 << 20)
5277#define LOW_CONTENTION (1 << 19)
5278#define HIGH_CONTENTION (1 << 18)
5279#define TXDSI_VC_ID_INVALID (1 << 17)
5280#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
5281#define TXCHECKSUM_ERROR (1 << 15)
5282#define TXECC_MULTIBIT_ERROR (1 << 14)
5283#define TXECC_SINGLE_BIT_ERROR (1 << 13)
5284#define TXFALSE_CONTROL_ERROR (1 << 12)
5285#define RXDSI_VC_ID_INVALID (1 << 11)
5286#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
5287#define RXCHECKSUM_ERROR (1 << 9)
5288#define RXECC_MULTIBIT_ERROR (1 << 8)
5289#define RXECC_SINGLE_BIT_ERROR (1 << 7)
5290#define RXFALSE_CONTROL_ERROR (1 << 6)
5291#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
5292#define RX_LP_TX_SYNC_ERROR (1 << 4)
5293#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
5294#define RXEOT_SYNC_ERROR (1 << 2)
5295#define RXSOT_SYNC_ERROR (1 << 1)
5296#define RXSOT_ERROR (1 << 0)
5297
5298#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
5299#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
5300#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
5301#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
5302#define CMD_MODE_NOT_SUPPORTED (0 << 13)
5303#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
5304#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
5305#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
5306#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
5307#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
5308#define VID_MODE_FORMAT_MASK (0xf << 7)
5309#define VID_MODE_NOT_SUPPORTED (0 << 7)
5310#define VID_MODE_FORMAT_RGB565 (1 << 7)
5311#define VID_MODE_FORMAT_RGB666 (2 << 7)
5312#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
5313#define VID_MODE_FORMAT_RGB888 (4 << 7)
5314#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
5315#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
5316#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
5317#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
5318#define DATA_LANES_PRG_REG_SHIFT 0
5319#define DATA_LANES_PRG_REG_MASK (7 << 0)
5320
5321#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
5322#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
5323#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
5324#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
5325
5326#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
5327#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
5328#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
5329#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
5330
5331#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
5332#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
5333#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
5334#define TURN_AROUND_TIMEOUT_MASK 0x3f
5335
5336#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
5337#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
5338#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
5339#define DEVICE_RESET_TIMER_MASK 0xffff
5340
5341#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
5342#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
5343#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
5344#define VERTICAL_ADDRESS_SHIFT 16
5345#define VERTICAL_ADDRESS_MASK (0xffff << 16)
5346#define HORIZONTAL_ADDRESS_SHIFT 0
5347#define HORIZONTAL_ADDRESS_MASK 0xffff
5348
5349#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
5350#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
5351#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
5352#define DBI_FIFO_EMPTY_HALF (0 << 0)
5353#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
5354#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
5355
5356/* regs below are bits 15:0 */
5357#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
5358#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
5359#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
5360
5361#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
5362#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
5363#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
5364
5365#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
5366#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
5367#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
5368
5369#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
5370#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
5371#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
5372
5373#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
5374#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
5375#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
5376
5377#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
5378#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
5379#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
5380
5381#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
5382#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
5383#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
5384
5385#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
5386#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
5387#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
5388/* regs above are bits 15:0 */
5389
5390#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
5391#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
5392#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
5393#define DPI_LP_MODE (1 << 6)
5394#define BACKLIGHT_OFF (1 << 5)
5395#define BACKLIGHT_ON (1 << 4)
5396#define COLOR_MODE_OFF (1 << 3)
5397#define COLOR_MODE_ON (1 << 2)
5398#define TURN_ON (1 << 1)
5399#define SHUTDOWN (1 << 0)
5400
5401#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
5402#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
5403#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
5404#define COMMAND_BYTE_SHIFT 0
5405#define COMMAND_BYTE_MASK (0x3f << 0)
5406
5407#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
5408#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
5409#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
5410#define MASTER_INIT_TIMER_SHIFT 0
5411#define MASTER_INIT_TIMER_MASK (0xffff << 0)
5412
5413#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
5414#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
5415#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
5416#define MAX_RETURN_PKT_SIZE_SHIFT 0
5417#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
5418
5419#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
5420#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
5421#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
5422#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
5423#define DISABLE_VIDEO_BTA (1 << 3)
5424#define IP_TG_CONFIG (1 << 2)
5425#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
5426#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
5427#define VIDEO_MODE_BURST (3 << 0)
5428
5429#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
5430#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
5431#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
5432#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
5433#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
5434#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
5435#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
5436#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
5437#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
5438#define CLOCKSTOP (1 << 1)
5439#define EOT_DISABLE (1 << 0)
5440
5441#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
5442#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
5443#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
5444#define LP_BYTECLK_SHIFT 0
5445#define LP_BYTECLK_MASK (0xffff << 0)
5446
5447/* bits 31:0 */
5448#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
5449#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
5450#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
5451
5452/* bits 31:0 */
5453#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
5454#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
5455#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
5456
5457#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
5458#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
5459#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
5460#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
5461#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
5462#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
5463#define LONG_PACKET_WORD_COUNT_SHIFT 8
5464#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
5465#define SHORT_PACKET_PARAM_SHIFT 8
5466#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
5467#define VIRTUAL_CHANNEL_SHIFT 6
5468#define VIRTUAL_CHANNEL_MASK (3 << 6)
5469#define DATA_TYPE_SHIFT 0
5470#define DATA_TYPE_MASK (3f << 0)
5471/* data type values, see include/video/mipi_display.h */
5472
5473#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
5474#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
5475#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
5476#define DPI_FIFO_EMPTY (1 << 28)
5477#define DBI_FIFO_EMPTY (1 << 27)
5478#define LP_CTRL_FIFO_EMPTY (1 << 26)
5479#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
5480#define LP_CTRL_FIFO_FULL (1 << 24)
5481#define HS_CTRL_FIFO_EMPTY (1 << 18)
5482#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
5483#define HS_CTRL_FIFO_FULL (1 << 16)
5484#define LP_DATA_FIFO_EMPTY (1 << 10)
5485#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
5486#define LP_DATA_FIFO_FULL (1 << 8)
5487#define HS_DATA_FIFO_EMPTY (1 << 2)
5488#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
5489#define HS_DATA_FIFO_FULL (1 << 0)
5490
5491#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
5492#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
5493#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
5494#define DBI_HS_LP_MODE_MASK (1 << 0)
5495#define DBI_LP_MODE (1 << 0)
5496#define DBI_HS_MODE (0 << 0)
5497
5498#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
5499#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
5500#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
5501#define EXIT_ZERO_COUNT_SHIFT 24
5502#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
5503#define TRAIL_COUNT_SHIFT 16
5504#define TRAIL_COUNT_MASK (0x1f << 16)
5505#define CLK_ZERO_COUNT_SHIFT 8
5506#define CLK_ZERO_COUNT_MASK (0xff << 8)
5507#define PREPARE_COUNT_SHIFT 0
5508#define PREPARE_COUNT_MASK (0x3f << 0)
5509
5510/* bits 31:0 */
5511#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
5512#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
5513#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
5514
5515#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
5516#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
5517#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
5518#define LP_HS_SSW_CNT_SHIFT 16
5519#define LP_HS_SSW_CNT_MASK (0xffff << 16)
5520#define HS_LP_PWR_SW_CNT_SHIFT 0
5521#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
5522
5523#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
5524#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
5525#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
5526#define STOP_STATE_STALL_COUNTER_SHIFT 0
5527#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
5528
5529#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
5530#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
5531#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
5532#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
5533#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
5534#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
5535#define RX_CONTENTION_DETECTED (1 << 0)
5536
5537/* XXX: only pipe A ?!? */
5538#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
5539#define DBI_TYPEC_ENABLE (1 << 31)
5540#define DBI_TYPEC_WIP (1 << 30)
5541#define DBI_TYPEC_OPTION_SHIFT 28
5542#define DBI_TYPEC_OPTION_MASK (3 << 28)
5543#define DBI_TYPEC_FREQ_SHIFT 24
5544#define DBI_TYPEC_FREQ_MASK (0xf << 24)
5545#define DBI_TYPEC_OVERRIDE (1 << 8)
5546#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
5547#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
5548
5549
5550/* MIPI adapter registers */
5551
5552#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
5553#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
5554#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
5555#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
5556#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
5557#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
5558#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
5559#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
5560#define READ_REQUEST_PRIORITY_SHIFT 3
5561#define READ_REQUEST_PRIORITY_MASK (3 << 3)
5562#define READ_REQUEST_PRIORITY_LOW (0 << 3)
5563#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
5564#define RGB_FLIP_TO_BGR (1 << 2)
5565
5566#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
5567#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
5568#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
5569#define DATA_MEM_ADDRESS_SHIFT 5
5570#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
5571#define DATA_VALID (1 << 0)
5572
5573#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
5574#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
5575#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
5576#define DATA_LENGTH_SHIFT 0
5577#define DATA_LENGTH_MASK (0xfffff << 0)
5578
5579#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
5580#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
5581#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
5582#define COMMAND_MEM_ADDRESS_SHIFT 5
5583#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
5584#define AUTO_PWG_ENABLE (1 << 2)
5585#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
5586#define COMMAND_VALID (1 << 0)
5587
5588#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
5589#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
5590#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
5591#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
5592#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
5593
5594#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
5595#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
5596#define MIPI_READ_DATA_RETURN(pipe, n) \
5597 (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
5598
5599#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
5600#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
5601#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
5602#define READ_DATA_VALID(n) (1 << (n))
5603
5119#endif /* _I915_REG_H_ */ 5604#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 70db618989c4..3538370e3a47 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -340,7 +340,9 @@ int i915_save_state(struct drm_device *dev)
340 struct drm_i915_private *dev_priv = dev->dev_private; 340 struct drm_i915_private *dev_priv = dev->dev_private;
341 int i; 341 int i;
342 342
343 pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); 343 if (INTEL_INFO(dev)->gen <= 4)
344 pci_read_config_byte(dev->pdev, LBB,
345 &dev_priv->regfile.saveLBB);
344 346
345 mutex_lock(&dev->struct_mutex); 347 mutex_lock(&dev->struct_mutex);
346 348
@@ -390,7 +392,9 @@ int i915_restore_state(struct drm_device *dev)
390 struct drm_i915_private *dev_priv = dev->dev_private; 392 struct drm_i915_private *dev_priv = dev->dev_private;
391 int i; 393 int i;
392 394
393 pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); 395 if (INTEL_INFO(dev)->gen <= 4)
396 pci_write_config_byte(dev->pdev, LBB,
397 dev_priv->regfile.saveLBB);
394 398
395 mutex_lock(&dev->struct_mutex); 399 mutex_lock(&dev->struct_mutex);
396 400
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c8c4112de110..1beec51b8e26 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,30 +32,50 @@
32#include "intel_drv.h" 32#include "intel_drv.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34 34
35#define dev_to_drm_minor(d) dev_get_drvdata((d))
36
35#ifdef CONFIG_PM 37#ifdef CONFIG_PM
36static u32 calc_residency(struct drm_device *dev, const u32 reg) 38static u32 calc_residency(struct drm_device *dev, const u32 reg)
37{ 39{
38 struct drm_i915_private *dev_priv = dev->dev_private; 40 struct drm_i915_private *dev_priv = dev->dev_private;
39 u64 raw_time; /* 32b value may overflow during fixed point math */ 41 u64 raw_time; /* 32b value may overflow during fixed point math */
42 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
40 43
41 if (!intel_enable_rc6(dev)) 44 if (!intel_enable_rc6(dev))
42 return 0; 45 return 0;
43 46
44 raw_time = I915_READ(reg) * 128ULL; 47 /* On VLV, residency time is in CZ units rather than 1.28us */
45 return DIV_ROUND_UP_ULL(raw_time, 100000); 48 if (IS_VALLEYVIEW(dev)) {
49 u32 clkctl2;
50
51 clkctl2 = I915_READ(VLV_CLK_CTL2) >>
52 CLK_CTL2_CZCOUNT_30NS_SHIFT;
53 if (!clkctl2) {
54 WARN(!clkctl2, "bogus CZ count value");
55 return 0;
56 }
57 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
58 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
59 units <<= 8;
60
61 div = 1000000ULL * bias;
62 }
63
64 raw_time = I915_READ(reg) * units;
65 return DIV_ROUND_UP_ULL(raw_time, div);
46} 66}
47 67
48static ssize_t 68static ssize_t
49show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 69show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
50{ 70{
51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 71 struct drm_minor *dminor = dev_to_drm_minor(kdev);
52 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); 72 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
53} 73}
54 74
55static ssize_t 75static ssize_t
56show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 76show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
57{ 77{
58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 78 struct drm_minor *dminor = dev_get_drvdata(kdev);
59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 79 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
60 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 80 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
61} 81}
@@ -63,16 +83,20 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
63static ssize_t 83static ssize_t
64show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) 84show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
65{ 85{
66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 86 struct drm_minor *dminor = dev_to_drm_minor(kdev);
67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 87 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
88 if (IS_VALLEYVIEW(dminor->dev))
89 rc6p_residency = 0;
68 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); 90 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
69} 91}
70 92
71static ssize_t 93static ssize_t
72show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) 94show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
73{ 95{
74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 96 struct drm_minor *dminor = dev_to_drm_minor(kdev);
75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 97 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
98 if (IS_VALLEYVIEW(dminor->dev))
99 rc6pp_residency = 0;
76 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 100 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
77} 101}
78 102
@@ -97,7 +121,7 @@ static struct attribute_group rc6_attr_group = {
97 121
98static int l3_access_valid(struct drm_device *dev, loff_t offset) 122static int l3_access_valid(struct drm_device *dev, loff_t offset)
99{ 123{
100 if (!HAS_L3_GPU_CACHE(dev)) 124 if (!HAS_L3_DPF(dev))
101 return -EPERM; 125 return -EPERM;
102 126
103 if (offset % 4 != 0) 127 if (offset % 4 != 0)
@@ -115,31 +139,34 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
115 loff_t offset, size_t count) 139 loff_t offset, size_t count)
116{ 140{
117 struct device *dev = container_of(kobj, struct device, kobj); 141 struct device *dev = container_of(kobj, struct device, kobj);
118 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 142 struct drm_minor *dminor = dev_to_drm_minor(dev);
119 struct drm_device *drm_dev = dminor->dev; 143 struct drm_device *drm_dev = dminor->dev;
120 struct drm_i915_private *dev_priv = drm_dev->dev_private; 144 struct drm_i915_private *dev_priv = drm_dev->dev_private;
121 uint32_t misccpctl; 145 int slice = (int)(uintptr_t)attr->private;
122 int i, ret; 146 int ret;
147
148 count = round_down(count, 4);
123 149
124 ret = l3_access_valid(drm_dev, offset); 150 ret = l3_access_valid(drm_dev, offset);
125 if (ret) 151 if (ret)
126 return ret; 152 return ret;
127 153
154 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
155
128 ret = i915_mutex_lock_interruptible(drm_dev); 156 ret = i915_mutex_lock_interruptible(drm_dev);
129 if (ret) 157 if (ret)
130 return ret; 158 return ret;
131 159
132 misccpctl = I915_READ(GEN7_MISCCPCTL); 160 if (dev_priv->l3_parity.remap_info[slice])
133 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 161 memcpy(buf,
134 162 dev_priv->l3_parity.remap_info[slice] + (offset/4),
135 for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) 163 count);
136 *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); 164 else
137 165 memset(buf, 0, count);
138 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
139 166
140 mutex_unlock(&drm_dev->struct_mutex); 167 mutex_unlock(&drm_dev->struct_mutex);
141 168
142 return i - offset; 169 return count;
143} 170}
144 171
145static ssize_t 172static ssize_t
@@ -148,21 +175,26 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
148 loff_t offset, size_t count) 175 loff_t offset, size_t count)
149{ 176{
150 struct device *dev = container_of(kobj, struct device, kobj); 177 struct device *dev = container_of(kobj, struct device, kobj);
151 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 178 struct drm_minor *dminor = dev_to_drm_minor(dev);
152 struct drm_device *drm_dev = dminor->dev; 179 struct drm_device *drm_dev = dminor->dev;
153 struct drm_i915_private *dev_priv = drm_dev->dev_private; 180 struct drm_i915_private *dev_priv = drm_dev->dev_private;
181 struct i915_hw_context *ctx;
154 u32 *temp = NULL; /* Just here to make handling failures easy */ 182 u32 *temp = NULL; /* Just here to make handling failures easy */
183 int slice = (int)(uintptr_t)attr->private;
155 int ret; 184 int ret;
156 185
157 ret = l3_access_valid(drm_dev, offset); 186 ret = l3_access_valid(drm_dev, offset);
158 if (ret) 187 if (ret)
159 return ret; 188 return ret;
160 189
190 if (dev_priv->hw_contexts_disabled)
191 return -ENXIO;
192
161 ret = i915_mutex_lock_interruptible(drm_dev); 193 ret = i915_mutex_lock_interruptible(drm_dev);
162 if (ret) 194 if (ret)
163 return ret; 195 return ret;
164 196
165 if (!dev_priv->l3_parity.remap_info) { 197 if (!dev_priv->l3_parity.remap_info[slice]) {
166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 198 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
167 if (!temp) { 199 if (!temp) {
168 mutex_unlock(&drm_dev->struct_mutex); 200 mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
182 * at this point it is left as a TODO. 214 * at this point it is left as a TODO.
183 */ 215 */
184 if (temp) 216 if (temp)
185 dev_priv->l3_parity.remap_info = temp; 217 dev_priv->l3_parity.remap_info[slice] = temp;
186 218
187 memcpy(dev_priv->l3_parity.remap_info + (offset/4), 219 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
188 buf + (offset/4),
189 count);
190 220
191 i915_gem_l3_remap(drm_dev); 221 /* NB: We defer the remapping until we switch to the context */
222 list_for_each_entry(ctx, &dev_priv->context_list, link)
223 ctx->remap_slice |= (1<<slice);
192 224
193 mutex_unlock(&drm_dev->struct_mutex); 225 mutex_unlock(&drm_dev->struct_mutex);
194 226
@@ -200,13 +232,23 @@ static struct bin_attribute dpf_attrs = {
200 .size = GEN7_L3LOG_SIZE, 232 .size = GEN7_L3LOG_SIZE,
201 .read = i915_l3_read, 233 .read = i915_l3_read,
202 .write = i915_l3_write, 234 .write = i915_l3_write,
203 .mmap = NULL 235 .mmap = NULL,
236 .private = (void *)0
237};
238
239static struct bin_attribute dpf_attrs_1 = {
240 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
241 .size = GEN7_L3LOG_SIZE,
242 .read = i915_l3_read,
243 .write = i915_l3_write,
244 .mmap = NULL,
245 .private = (void *)1
204}; 246};
205 247
206static ssize_t gt_cur_freq_mhz_show(struct device *kdev, 248static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
207 struct device_attribute *attr, char *buf) 249 struct device_attribute *attr, char *buf)
208{ 250{
209 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 251 struct drm_minor *minor = dev_to_drm_minor(kdev);
210 struct drm_device *dev = minor->dev; 252 struct drm_device *dev = minor->dev;
211 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret; 254 int ret;
@@ -227,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
227static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 269static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
228 struct device_attribute *attr, char *buf) 270 struct device_attribute *attr, char *buf)
229{ 271{
230 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 272 struct drm_minor *minor = dev_to_drm_minor(kdev);
231 struct drm_device *dev = minor->dev; 273 struct drm_device *dev = minor->dev;
232 struct drm_i915_private *dev_priv = dev->dev_private; 274 struct drm_i915_private *dev_priv = dev->dev_private;
233 275
@@ -238,7 +280,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
238 280
239static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 281static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
240{ 282{
241 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 283 struct drm_minor *minor = dev_to_drm_minor(kdev);
242 struct drm_device *dev = minor->dev; 284 struct drm_device *dev = minor->dev;
243 struct drm_i915_private *dev_priv = dev->dev_private; 285 struct drm_i915_private *dev_priv = dev->dev_private;
244 int ret; 286 int ret;
@@ -257,7 +299,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
257 struct device_attribute *attr, 299 struct device_attribute *attr,
258 const char *buf, size_t count) 300 const char *buf, size_t count)
259{ 301{
260 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 302 struct drm_minor *minor = dev_to_drm_minor(kdev);
261 struct drm_device *dev = minor->dev; 303 struct drm_device *dev = minor->dev;
262 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
263 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; 305 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -310,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
310 352
311static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 353static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
312{ 354{
313 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 355 struct drm_minor *minor = dev_to_drm_minor(kdev);
314 struct drm_device *dev = minor->dev; 356 struct drm_device *dev = minor->dev;
315 struct drm_i915_private *dev_priv = dev->dev_private; 357 struct drm_i915_private *dev_priv = dev->dev_private;
316 int ret; 358 int ret;
@@ -329,7 +371,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
329 struct device_attribute *attr, 371 struct device_attribute *attr,
330 const char *buf, size_t count) 372 const char *buf, size_t count)
331{ 373{
332 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 374 struct drm_minor *minor = dev_to_drm_minor(kdev);
333 struct drm_device *dev = minor->dev; 375 struct drm_device *dev = minor->dev;
334 struct drm_i915_private *dev_priv = dev->dev_private; 376 struct drm_i915_private *dev_priv = dev->dev_private;
335 u32 val, rp_state_cap, hw_max, hw_min; 377 u32 val, rp_state_cap, hw_max, hw_min;
@@ -388,7 +430,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
388/* For now we have a static number of RP states */ 430/* For now we have a static number of RP states */
389static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 431static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
390{ 432{
391 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 433 struct drm_minor *minor = dev_to_drm_minor(kdev);
392 struct drm_device *dev = minor->dev; 434 struct drm_device *dev = minor->dev;
393 struct drm_i915_private *dev_priv = dev->dev_private; 435 struct drm_i915_private *dev_priv = dev->dev_private;
394 u32 val, rp_state_cap; 436 u32 val, rp_state_cap;
@@ -436,7 +478,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
436{ 478{
437 479
438 struct device *kdev = container_of(kobj, struct device, kobj); 480 struct device *kdev = container_of(kobj, struct device, kobj);
439 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 481 struct drm_minor *minor = dev_to_drm_minor(kdev);
440 struct drm_device *dev = minor->dev; 482 struct drm_device *dev = minor->dev;
441 struct i915_error_state_file_priv error_priv; 483 struct i915_error_state_file_priv error_priv;
442 struct drm_i915_error_state_buf error_str; 484 struct drm_i915_error_state_buf error_str;
@@ -471,7 +513,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
471 loff_t off, size_t count) 513 loff_t off, size_t count)
472{ 514{
473 struct device *kdev = container_of(kobj, struct device, kobj); 515 struct device *kdev = container_of(kobj, struct device, kobj);
474 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 516 struct drm_minor *minor = dev_to_drm_minor(kdev);
475 struct drm_device *dev = minor->dev; 517 struct drm_device *dev = minor->dev;
476 int ret; 518 int ret;
477 519
@@ -501,27 +543,34 @@ void i915_setup_sysfs(struct drm_device *dev)
501 543
502#ifdef CONFIG_PM 544#ifdef CONFIG_PM
503 if (INTEL_INFO(dev)->gen >= 6) { 545 if (INTEL_INFO(dev)->gen >= 6) {
504 ret = sysfs_merge_group(&dev->primary->kdev.kobj, 546 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
505 &rc6_attr_group); 547 &rc6_attr_group);
506 if (ret) 548 if (ret)
507 DRM_ERROR("RC6 residency sysfs setup failed\n"); 549 DRM_ERROR("RC6 residency sysfs setup failed\n");
508 } 550 }
509#endif 551#endif
510 if (HAS_L3_GPU_CACHE(dev)) { 552 if (HAS_L3_DPF(dev)) {
511 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); 553 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
512 if (ret) 554 if (ret)
513 DRM_ERROR("l3 parity sysfs setup failed\n"); 555 DRM_ERROR("l3 parity sysfs setup failed\n");
556
557 if (NUM_L3_SLICES(dev) > 1) {
558 ret = device_create_bin_file(dev->primary->kdev,
559 &dpf_attrs_1);
560 if (ret)
561 DRM_ERROR("l3 parity slice 1 setup failed\n");
562 }
514 } 563 }
515 564
516 ret = 0; 565 ret = 0;
517 if (IS_VALLEYVIEW(dev)) 566 if (IS_VALLEYVIEW(dev))
518 ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs); 567 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
519 else if (INTEL_INFO(dev)->gen >= 6) 568 else if (INTEL_INFO(dev)->gen >= 6)
520 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 569 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
521 if (ret) 570 if (ret)
522 DRM_ERROR("RPS sysfs setup failed\n"); 571 DRM_ERROR("RPS sysfs setup failed\n");
523 572
524 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 573 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
525 &error_state_attr); 574 &error_state_attr);
526 if (ret) 575 if (ret)
527 DRM_ERROR("error_state sysfs setup failed\n"); 576 DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +578,14 @@ void i915_setup_sysfs(struct drm_device *dev)
529 578
530void i915_teardown_sysfs(struct drm_device *dev) 579void i915_teardown_sysfs(struct drm_device *dev)
531{ 580{
532 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 581 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
533 if (IS_VALLEYVIEW(dev)) 582 if (IS_VALLEYVIEW(dev))
534 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); 583 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
535 else 584 else
536 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 585 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
537 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 586 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
587 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
538#ifdef CONFIG_PM 588#ifdef CONFIG_PM
539 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 589 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
540#endif 590#endif
541} 591}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6f6194..6e580c98dede 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
233 TP_printk("dev=%d", __entry->dev) 233 TP_printk("dev=%d", __entry->dev)
234); 234);
235 235
236TRACE_EVENT(i915_gem_evict_vm,
237 TP_PROTO(struct i915_address_space *vm),
238 TP_ARGS(vm),
239
240 TP_STRUCT__entry(
241 __field(struct i915_address_space *, vm)
242 ),
243
244 TP_fast_assign(
245 __entry->vm = vm;
246 ),
247
248 TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
249);
250
251TRACE_EVENT(i915_gem_ring_sync_to,
252 TP_PROTO(struct intel_ring_buffer *from,
253 struct intel_ring_buffer *to,
254 u32 seqno),
255 TP_ARGS(from, to, seqno),
256
257 TP_STRUCT__entry(
258 __field(u32, dev)
259 __field(u32, sync_from)
260 __field(u32, sync_to)
261 __field(u32, seqno)
262 ),
263
264 TP_fast_assign(
265 __entry->dev = from->dev->primary->index;
266 __entry->sync_from = from->id;
267 __entry->sync_to = to->id;
268 __entry->seqno = seqno;
269 ),
270
271 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
272 __entry->dev,
273 __entry->sync_from, __entry->sync_to,
274 __entry->seqno)
275);
276
236TRACE_EVENT(i915_gem_ring_dispatch, 277TRACE_EVENT(i915_gem_ring_dispatch,
237 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), 278 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
238 TP_ARGS(ring, seqno, flags), 279 TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
304 TP_ARGS(ring, seqno) 345 TP_ARGS(ring, seqno)
305); 346);
306 347
307DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 348TRACE_EVENT(i915_gem_request_complete,
308 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 349 TP_PROTO(struct intel_ring_buffer *ring),
309 TP_ARGS(ring, seqno) 350 TP_ARGS(ring),
351
352 TP_STRUCT__entry(
353 __field(u32, dev)
354 __field(u32, ring)
355 __field(u32, seqno)
356 ),
357
358 TP_fast_assign(
359 __entry->dev = ring->dev->primary->index;
360 __entry->ring = ring->id;
361 __entry->seqno = ring->get_seqno(ring, false);
362 ),
363
364 TP_printk("dev=%u, ring=%u, seqno=%u",
365 __entry->dev, __entry->ring, __entry->seqno)
310); 366);
311 367
312DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 368DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed8bc5f..e29bcae1ef81 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
389{ 389{
390 struct sdvo_device_mapping *p_mapping; 390 struct sdvo_device_mapping *p_mapping;
391 struct bdb_general_definitions *p_defs; 391 struct bdb_general_definitions *p_defs;
392 struct child_device_config *p_child; 392 union child_device_config *p_child;
393 int i, child_device_num, count; 393 int i, child_device_num, count;
394 u16 block_size; 394 u16 block_size;
395 395
@@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
416 count = 0; 416 count = 0;
417 for (i = 0; i < child_device_num; i++) { 417 for (i = 0; i < child_device_num; i++) {
418 p_child = &(p_defs->devices[i]); 418 p_child = &(p_defs->devices[i]);
419 if (!p_child->device_type) { 419 if (!p_child->old.device_type) {
420 /* skip the device block if device type is invalid */ 420 /* skip the device block if device type is invalid */
421 continue; 421 continue;
422 } 422 }
423 if (p_child->slave_addr != SLAVE_ADDR1 && 423 if (p_child->old.slave_addr != SLAVE_ADDR1 &&
424 p_child->slave_addr != SLAVE_ADDR2) { 424 p_child->old.slave_addr != SLAVE_ADDR2) {
425 /* 425 /*
426 * If the slave address is neither 0x70 nor 0x72, 426 * If the slave address is neither 0x70 nor 0x72,
427 * it is not a SDVO device. Skip it. 427 * it is not a SDVO device. Skip it.
428 */ 428 */
429 continue; 429 continue;
430 } 430 }
431 if (p_child->dvo_port != DEVICE_PORT_DVOB && 431 if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
432 p_child->dvo_port != DEVICE_PORT_DVOC) { 432 p_child->old.dvo_port != DEVICE_PORT_DVOC) {
433 /* skip the incorrect SDVO port */ 433 /* skip the incorrect SDVO port */
434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); 434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
435 continue; 435 continue;
436 } 436 }
437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" 437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
438 " %s port\n", 438 " %s port\n",
439 p_child->slave_addr, 439 p_child->old.slave_addr,
440 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 440 (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
441 "SDVOB" : "SDVOC"); 441 "SDVOB" : "SDVOC");
442 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); 442 p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
443 if (!p_mapping->initialized) { 443 if (!p_mapping->initialized) {
444 p_mapping->dvo_port = p_child->dvo_port; 444 p_mapping->dvo_port = p_child->old.dvo_port;
445 p_mapping->slave_addr = p_child->slave_addr; 445 p_mapping->slave_addr = p_child->old.slave_addr;
446 p_mapping->dvo_wiring = p_child->dvo_wiring; 446 p_mapping->dvo_wiring = p_child->old.dvo_wiring;
447 p_mapping->ddc_pin = p_child->ddc_pin; 447 p_mapping->ddc_pin = p_child->old.ddc_pin;
448 p_mapping->i2c_pin = p_child->i2c_pin; 448 p_mapping->i2c_pin = p_child->old.i2c_pin;
449 p_mapping->initialized = 1; 449 p_mapping->initialized = 1;
450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", 450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
451 p_mapping->dvo_port, 451 p_mapping->dvo_port,
@@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
458 "two SDVO device.\n"); 458 "two SDVO device.\n");
459 } 459 }
460 if (p_child->slave2_addr) { 460 if (p_child->old.slave2_addr) {
461 /* Maybe this is a SDVO device with multiple inputs */ 461 /* Maybe this is a SDVO device with multiple inputs */
462 /* And the mapping info is not added */ 462 /* And the mapping info is not added */
463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" 463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@ static void
477parse_driver_features(struct drm_i915_private *dev_priv, 477parse_driver_features(struct drm_i915_private *dev_priv,
478 struct bdb_header *bdb) 478 struct bdb_header *bdb)
479{ 479{
480 struct drm_device *dev = dev_priv->dev;
481 struct bdb_driver_features *driver; 480 struct bdb_driver_features *driver;
482 481
483 driver = find_section(bdb, BDB_DRIVER_FEATURES); 482 driver = find_section(bdb, BDB_DRIVER_FEATURES);
484 if (!driver) 483 if (!driver)
485 return; 484 return;
486 485
487 if (SUPPORTS_EDP(dev) && 486 if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
488 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
489 dev_priv->vbt.edp_support = 1; 487 dev_priv->vbt.edp_support = 1;
490 488
491 if (driver->dual_frequency) 489 if (driver->dual_frequency)
@@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
501 499
502 edp = find_section(bdb, BDB_EDP); 500 edp = find_section(bdb, BDB_EDP);
503 if (!edp) { 501 if (!edp) {
504 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support) 502 if (dev_priv->vbt.edp_support)
505 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); 503 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
506 return; 504 return;
507 } 505 }
@@ -569,11 +567,149 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
569} 567}
570 568
571static void 569static void
570parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
571{
572 struct bdb_mipi *mipi;
573
574 mipi = find_section(bdb, BDB_MIPI);
575 if (!mipi) {
576 DRM_DEBUG_KMS("No MIPI BDB found");
577 return;
578 }
579
580 /* XXX: add more info */
581 dev_priv->vbt.dsi.panel_id = mipi->panel_id;
582}
583
584static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
585 struct bdb_header *bdb)
586{
587 union child_device_config *it, *child = NULL;
588 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
589 uint8_t hdmi_level_shift;
590 int i, j;
591 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
592 uint8_t aux_channel;
593 /* Each DDI port can have more than one value on the "DVO Port" field,
594 * so look for all the possible values for each port and abort if more
595 * than one is found. */
596 int dvo_ports[][2] = {
597 {DVO_PORT_HDMIA, DVO_PORT_DPA},
598 {DVO_PORT_HDMIB, DVO_PORT_DPB},
599 {DVO_PORT_HDMIC, DVO_PORT_DPC},
600 {DVO_PORT_HDMID, DVO_PORT_DPD},
601 {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
602 };
603
604 /* Find the child device to use, abort if more than one found. */
605 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
606 it = dev_priv->vbt.child_dev + i;
607
608 for (j = 0; j < 2; j++) {
609 if (dvo_ports[port][j] == -1)
610 break;
611
612 if (it->common.dvo_port == dvo_ports[port][j]) {
613 if (child) {
614 DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
615 port_name(port));
616 return;
617 }
618 child = it;
619 }
620 }
621 }
622 if (!child)
623 return;
624
625 aux_channel = child->raw[25];
626
627 is_dvi = child->common.device_type & (1 << 4);
628 is_dp = child->common.device_type & (1 << 2);
629 is_crt = child->common.device_type & (1 << 0);
630 is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0;
631 is_edp = is_dp && (child->common.device_type & (1 << 12));
632
633 info->supports_dvi = is_dvi;
634 info->supports_hdmi = is_hdmi;
635 info->supports_dp = is_dp;
636
637 DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
638 port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
639
640 if (is_edp && is_dvi)
641 DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
642 port_name(port));
643 if (is_crt && port != PORT_E)
644 DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
645 if (is_crt && (is_dvi || is_dp))
646 DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
647 port_name(port));
648 if (is_dvi && (port == PORT_A || port == PORT_E))
649 DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
650 if (!is_dvi && !is_dp && !is_crt)
651 DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
652 port_name(port));
653 if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
654 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
655
656 if (is_dvi) {
657 if (child->common.ddc_pin == 0x05 && port != PORT_B)
658 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
659 if (child->common.ddc_pin == 0x04 && port != PORT_C)
660 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
661 if (child->common.ddc_pin == 0x06 && port != PORT_D)
662 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
663 }
664
665 if (is_dp) {
666 if (aux_channel == 0x40 && port != PORT_A)
667 DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
668 if (aux_channel == 0x10 && port != PORT_B)
669 DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
670 if (aux_channel == 0x20 && port != PORT_C)
671 DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
672 if (aux_channel == 0x30 && port != PORT_D)
673 DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
674 }
675
676 if (bdb->version >= 158) {
677 /* The VBT HDMI level shift values match the table we have. */
678 hdmi_level_shift = child->raw[7] & 0xF;
679 if (hdmi_level_shift < 0xC) {
680 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
681 port_name(port),
682 hdmi_level_shift);
683 info->hdmi_level_shift = hdmi_level_shift;
684 }
685 }
686}
687
688static void parse_ddi_ports(struct drm_i915_private *dev_priv,
689 struct bdb_header *bdb)
690{
691 struct drm_device *dev = dev_priv->dev;
692 enum port port;
693
694 if (!HAS_DDI(dev))
695 return;
696
697 if (!dev_priv->vbt.child_dev_num)
698 return;
699
700 if (bdb->version < 155)
701 return;
702
703 for (port = PORT_A; port < I915_MAX_PORTS; port++)
704 parse_ddi_port(dev_priv, port, bdb);
705}
706
707static void
572parse_device_mapping(struct drm_i915_private *dev_priv, 708parse_device_mapping(struct drm_i915_private *dev_priv,
573 struct bdb_header *bdb) 709 struct bdb_header *bdb)
574{ 710{
575 struct bdb_general_definitions *p_defs; 711 struct bdb_general_definitions *p_defs;
576 struct child_device_config *p_child, *child_dev_ptr; 712 union child_device_config *p_child, *child_dev_ptr;
577 int i, child_device_num, count; 713 int i, child_device_num, count;
578 u16 block_size; 714 u16 block_size;
579 715
@@ -601,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
601 /* get the number of child device that is present */ 737 /* get the number of child device that is present */
602 for (i = 0; i < child_device_num; i++) { 738 for (i = 0; i < child_device_num; i++) {
603 p_child = &(p_defs->devices[i]); 739 p_child = &(p_defs->devices[i]);
604 if (!p_child->device_type) { 740 if (!p_child->common.device_type) {
605 /* skip the device block if device type is invalid */ 741 /* skip the device block if device type is invalid */
606 continue; 742 continue;
607 } 743 }
@@ -621,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
621 count = 0; 757 count = 0;
622 for (i = 0; i < child_device_num; i++) { 758 for (i = 0; i < child_device_num; i++) {
623 p_child = &(p_defs->devices[i]); 759 p_child = &(p_defs->devices[i]);
624 if (!p_child->device_type) { 760 if (!p_child->common.device_type) {
625 /* skip the device block if device type is invalid */ 761 /* skip the device block if device type is invalid */
626 continue; 762 continue;
627 } 763 }
@@ -637,6 +773,7 @@ static void
637init_vbt_defaults(struct drm_i915_private *dev_priv) 773init_vbt_defaults(struct drm_i915_private *dev_priv)
638{ 774{
639 struct drm_device *dev = dev_priv->dev; 775 struct drm_device *dev = dev_priv->dev;
776 enum port port;
640 777
641 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC; 778 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
642 779
@@ -655,6 +792,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
655 dev_priv->vbt.lvds_use_ssc = 1; 792 dev_priv->vbt.lvds_use_ssc = 1;
656 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 793 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
657 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); 794 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
795
796 for (port = PORT_A; port < I915_MAX_PORTS; port++) {
797 struct ddi_vbt_port_info *info =
798 &dev_priv->vbt.ddi_port_info[port];
799
800 /* Recommended BSpec default: 800mV 0dB. */
801 info->hdmi_level_shift = 6;
802
803 info->supports_dvi = (port != PORT_A && port != PORT_E);
804 info->supports_hdmi = info->supports_dvi;
805 info->supports_dp = (port != PORT_E);
806 }
658} 807}
659 808
660static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 809static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +894,8 @@ intel_parse_bios(struct drm_device *dev)
745 parse_device_mapping(dev_priv, bdb); 894 parse_device_mapping(dev_priv, bdb);
746 parse_driver_features(dev_priv, bdb); 895 parse_driver_features(dev_priv, bdb);
747 parse_edp(dev_priv, bdb); 896 parse_edp(dev_priv, bdb);
897 parse_mipi(dev_priv, bdb);
898 parse_ddi_ports(dev_priv, bdb);
748 899
749 if (bios) 900 if (bios)
750 pci_unmap_rom(pdev, bios); 901 pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index e088d6f0956a..287cc5a21c2e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,6 +104,7 @@ struct vbios_data {
104#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
105#define BDB_LVDS_BACKLIGHT 43 105#define BDB_LVDS_BACKLIGHT 43
106#define BDB_LVDS_POWER 44 106#define BDB_LVDS_POWER 44
107#define BDB_MIPI 50
107#define BDB_SKIP 254 /* VBIOS private block, ignore */ 108#define BDB_SKIP 254 /* VBIOS private block, ignore */
108 109
109struct bdb_general_features { 110struct bdb_general_features {
@@ -201,7 +202,10 @@ struct bdb_general_features {
201#define DEVICE_PORT_DVOB 0x01 202#define DEVICE_PORT_DVOB 0x01
202#define DEVICE_PORT_DVOC 0x02 203#define DEVICE_PORT_DVOC 0x02
203 204
204struct child_device_config { 205/* We used to keep this struct but without any version control. We should avoid
206 * using it in the future, but it should be safe to keep using it in the old
207 * code. */
208struct old_child_dev_config {
205 u16 handle; 209 u16 handle;
206 u16 device_type; 210 u16 device_type;
207 u8 device_id[10]; /* ascii string */ 211 u8 device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@ struct child_device_config {
223 u8 dvo_function; 227 u8 dvo_function;
224} __attribute__((packed)); 228} __attribute__((packed));
225 229
230/* This one contains field offsets that are known to be common for all BDB
231 * versions. Notice that the meaning of the contents contents may still change,
232 * but at least the offsets are consistent. */
233struct common_child_dev_config {
234 u16 handle;
235 u16 device_type;
236 u8 not_common1[12];
237 u8 dvo_port;
238 u8 not_common2[2];
239 u8 ddc_pin;
240 u16 edid_ptr;
241} __attribute__((packed));
242
243/* This field changes depending on the BDB version, so the most reliable way to
244 * read it is by checking the BDB version and reading the raw pointer. */
245union child_device_config {
246 /* This one is safe to be used anywhere, but the code should still check
247 * the BDB version. */
248 u8 raw[33];
249 /* This one should only be kept for legacy code. */
250 struct old_child_dev_config old;
251 /* This one should also be safe to use anywhere, even without version
252 * checks. */
253 struct common_child_dev_config common;
254};
255
226struct bdb_general_definitions { 256struct bdb_general_definitions {
227 /* DDC GPIO */ 257 /* DDC GPIO */
228 u8 crt_ddc_gmbus_pin; 258 u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@ struct bdb_general_definitions {
248 * number = (block_size - sizeof(bdb_general_definitions))/ 278 * number = (block_size - sizeof(bdb_general_definitions))/
249 * sizeof(child_device_config); 279 * sizeof(child_device_config);
250 */ 280 */
251 struct child_device_config devices[0]; 281 union child_device_config devices[0];
252} __attribute__((packed)); 282} __attribute__((packed));
253 283
254struct bdb_lvds_options { 284struct bdb_lvds_options {
@@ -618,4 +648,57 @@ int intel_parse_bios(struct drm_device *dev);
618#define PORT_IDPC 8 648#define PORT_IDPC 8
619#define PORT_IDPD 9 649#define PORT_IDPD 9
620 650
651/* Possible values for the "DVO Port" field for versions >= 155: */
652#define DVO_PORT_HDMIA 0
653#define DVO_PORT_HDMIB 1
654#define DVO_PORT_HDMIC 2
655#define DVO_PORT_HDMID 3
656#define DVO_PORT_LVDS 4
657#define DVO_PORT_TV 5
658#define DVO_PORT_CRT 6
659#define DVO_PORT_DPB 7
660#define DVO_PORT_DPC 8
661#define DVO_PORT_DPD 9
662#define DVO_PORT_DPA 10
663
664/* MIPI DSI panel info */
665struct bdb_mipi {
666 u16 panel_id;
667 u16 bridge_revision;
668
669 /* General params */
670 u32 dithering:1;
671 u32 bpp_pixel_format:1;
672 u32 rsvd1:1;
673 u32 dphy_valid:1;
674 u32 resvd2:28;
675
676 u16 port_info;
677 u16 rsvd3:2;
678 u16 num_lanes:2;
679 u16 rsvd4:12;
680
681 /* DSI config */
682 u16 virt_ch_num:2;
683 u16 vtm:2;
684 u16 rsvd5:12;
685
686 u32 dsi_clock;
687 u32 bridge_ref_clk;
688 u16 rsvd_pwr;
689
690 /* Dphy Params */
691 u32 prepare_cnt:5;
692 u32 rsvd6:3;
693 u32 clk_zero_cnt:8;
694 u32 trail_cnt:5;
695 u32 rsvd7:3;
696 u32 exit_zero_cnt:6;
697 u32 rsvd8:2;
698
699 u32 hl_switch_cnt;
700 u32 lp_byte_clk;
701 u32 clk_lane_switch_cnt;
702} __attribute__((packed));
703
621#endif /* _I830_BIOS_H_ */ 704#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ea9022ef15d5..942b9acb0d8e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
83 return true; 83 return true;
84} 84}
85 85
86static void intel_crt_get_config(struct intel_encoder *encoder, 86static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
87 struct intel_crtc_config *pipe_config)
88{ 87{
89 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 88 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
90 struct intel_crt *crt = intel_encoder_to_crt(encoder); 89 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -102,7 +101,35 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
102 else 101 else
103 flags |= DRM_MODE_FLAG_NVSYNC; 102 flags |= DRM_MODE_FLAG_NVSYNC;
104 103
105 pipe_config->adjusted_mode.flags |= flags; 104 return flags;
105}
106
107static void intel_crt_get_config(struct intel_encoder *encoder,
108 struct intel_crtc_config *pipe_config)
109{
110 struct drm_device *dev = encoder->base.dev;
111 int dotclock;
112
113 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
114
115 dotclock = pipe_config->port_clock;
116
117 if (HAS_PCH_SPLIT(dev))
118 ironlake_check_encoder_dotclock(pipe_config, dotclock);
119
120 pipe_config->adjusted_mode.crtc_clock = dotclock;
121}
122
123static void hsw_crt_get_config(struct intel_encoder *encoder,
124 struct intel_crtc_config *pipe_config)
125{
126 intel_ddi_get_config(encoder, pipe_config);
127
128 pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
129 DRM_MODE_FLAG_NHSYNC |
130 DRM_MODE_FLAG_PVSYNC |
131 DRM_MODE_FLAG_NVSYNC);
132 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
106} 133}
107 134
108/* Note: The caller is required to filter out dpms modes not supported by the 135/* Note: The caller is required to filter out dpms modes not supported by the
@@ -349,9 +376,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
349 376
350 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); 377 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
351 378
352 /* FIXME: debug force function and remove */
353 ret = true;
354
355 return ret; 379 return ret;
356} 380}
357 381
@@ -653,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
653 677
654static void intel_crt_destroy(struct drm_connector *connector) 678static void intel_crt_destroy(struct drm_connector *connector)
655{ 679{
656 drm_sysfs_connector_remove(connector);
657 drm_connector_cleanup(connector); 680 drm_connector_cleanup(connector);
658 kfree(connector); 681 kfree(connector);
659} 682}
@@ -759,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
759 if (!crt) 782 if (!crt)
760 return; 783 return;
761 784
762 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 785 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
763 if (!intel_connector) { 786 if (!intel_connector) {
764 kfree(crt); 787 kfree(crt);
765 return; 788 return;
@@ -799,7 +822,10 @@ void intel_crt_init(struct drm_device *dev)
799 crt->base.mode_set = intel_crt_mode_set; 822 crt->base.mode_set = intel_crt_mode_set;
800 crt->base.disable = intel_disable_crt; 823 crt->base.disable = intel_disable_crt;
801 crt->base.enable = intel_enable_crt; 824 crt->base.enable = intel_enable_crt;
802 crt->base.get_config = intel_crt_get_config; 825 if (IS_HASWELL(dev))
826 crt->base.get_config = hsw_crt_get_config;
827 else
828 crt->base.get_config = intel_crt_get_config;
803 if (I915_HAS_HOTPLUG(dev)) 829 if (I915_HAS_HOTPLUG(dev))
804 crt->base.hpd_pin = HPD_CRT; 830 crt->base.hpd_pin = HPD_CRT;
805 if (HAS_DDI(dev)) 831 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63de2701b974..6d335f8ca343 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
42 0x80C30FFF, 0x000B0000, 42 0x80C30FFF, 0x000B0000,
43 0x00FFFFFF, 0x00040006, 43 0x00FFFFFF, 0x00040006,
44 0x80D75FFF, 0x000B0000, 44 0x80D75FFF, 0x000B0000,
45 0x00FFFFFF, 0x00040006 /* HDMI parameters */
46}; 45};
47 46
48static const u32 hsw_ddi_translations_fdi[] = { 47static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,25 @@ static const u32 hsw_ddi_translations_fdi[] = {
55 0x00C30FFF, 0x001E0000, 54 0x00C30FFF, 0x001E0000,
56 0x00FFFFFF, 0x00060006, 55 0x00FFFFFF, 0x00060006,
57 0x00D75FFF, 0x001E0000, 56 0x00D75FFF, 0x001E0000,
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59}; 57};
60 58
61static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 59static const u32 hsw_ddi_translations_hdmi[] = {
60 /* Idx NT mV diff T mV diff db */
61 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
62 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
63 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
64 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
65 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
66 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
67 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
68 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
69 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
70 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
71 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
73};
74
75enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
62{ 76{
63 struct drm_encoder *encoder = &intel_encoder->base; 77 struct drm_encoder *encoder = &intel_encoder->base;
64 int type = intel_encoder->type; 78 int type = intel_encoder->type;
@@ -92,12 +106,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
92 const u32 *ddi_translations = (port == PORT_E) ? 106 const u32 *ddi_translations = (port == PORT_E) ?
93 hsw_ddi_translations_fdi : 107 hsw_ddi_translations_fdi :
94 hsw_ddi_translations_dp; 108 hsw_ddi_translations_dp;
109 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
95 110
96 for (i = 0, reg = DDI_BUF_TRANS(port); 111 for (i = 0, reg = DDI_BUF_TRANS(port);
97 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { 112 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
98 I915_WRITE(reg, ddi_translations[i]); 113 I915_WRITE(reg, ddi_translations[i]);
99 reg += 4; 114 reg += 4;
100 } 115 }
116 /* Entry 9 is for HDMI: */
117 for (i = 0; i < 2; i++) {
118 I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
119 reg += 4;
120 }
101} 121}
102 122
103/* Program DDI buffers translations for DP. By default, program ports A-D in DP 123/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +316,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder)
296 DRM_DEBUG_DRIVER("DP audio: write eld information\n"); 316 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
297 intel_write_eld(&encoder->base, adjusted_mode); 317 intel_write_eld(&encoder->base, adjusted_mode);
298 } 318 }
299
300 intel_dp_init_link_config(intel_dp);
301
302 } else if (type == INTEL_OUTPUT_HDMI) { 319 } else if (type == INTEL_OUTPUT_HDMI) {
303 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 320 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
304 321
@@ -767,9 +784,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
767 BUG(); 784 BUG();
768 } 785 }
769 786
770 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 787 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
771 temp |= TRANS_DDI_PVSYNC; 788 temp |= TRANS_DDI_PVSYNC;
772 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 789 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
773 temp |= TRANS_DDI_PHSYNC; 790 temp |= TRANS_DDI_PHSYNC;
774 791
775 if (cpu_transcoder == TRANSCODER_EDP) { 792 if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1202,7 +1219,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1202 1219
1203 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | 1220 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
1204 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; 1221 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
1205 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 1222 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1206 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; 1223 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
1207 I915_WRITE(DP_TP_CTL(port), val); 1224 I915_WRITE(DP_TP_CTL(port), val);
1208 POSTING_READ(DP_TP_CTL(port)); 1225 POSTING_READ(DP_TP_CTL(port));
@@ -1249,8 +1266,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1249 intel_dp_check_link_status(intel_dp); 1266 intel_dp_check_link_status(intel_dp);
1250} 1267}
1251 1268
1252static void intel_ddi_get_config(struct intel_encoder *encoder, 1269void intel_ddi_get_config(struct intel_encoder *encoder,
1253 struct intel_crtc_config *pipe_config) 1270 struct intel_crtc_config *pipe_config)
1254{ 1271{
1255 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1272 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1256 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1273 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1268,6 +1285,37 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
1268 flags |= DRM_MODE_FLAG_NVSYNC; 1285 flags |= DRM_MODE_FLAG_NVSYNC;
1269 1286
1270 pipe_config->adjusted_mode.flags |= flags; 1287 pipe_config->adjusted_mode.flags |= flags;
1288
1289 switch (temp & TRANS_DDI_BPC_MASK) {
1290 case TRANS_DDI_BPC_6:
1291 pipe_config->pipe_bpp = 18;
1292 break;
1293 case TRANS_DDI_BPC_8:
1294 pipe_config->pipe_bpp = 24;
1295 break;
1296 case TRANS_DDI_BPC_10:
1297 pipe_config->pipe_bpp = 30;
1298 break;
1299 case TRANS_DDI_BPC_12:
1300 pipe_config->pipe_bpp = 36;
1301 break;
1302 default:
1303 break;
1304 }
1305
1306 switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
1307 case TRANS_DDI_MODE_SELECT_HDMI:
1308 case TRANS_DDI_MODE_SELECT_DVI:
1309 case TRANS_DDI_MODE_SELECT_FDI:
1310 break;
1311 case TRANS_DDI_MODE_SELECT_DP_SST:
1312 case TRANS_DDI_MODE_SELECT_DP_MST:
1313 pipe_config->has_dp_encoder = true;
1314 intel_dp_get_m_n(intel_crtc, pipe_config);
1315 break;
1316 default:
1317 break;
1318 }
1271} 1319}
1272 1320
1273static void intel_ddi_destroy(struct drm_encoder *encoder) 1321static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1305,12 +1353,23 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1305 struct drm_encoder *encoder; 1353 struct drm_encoder *encoder;
1306 struct intel_connector *hdmi_connector = NULL; 1354 struct intel_connector *hdmi_connector = NULL;
1307 struct intel_connector *dp_connector = NULL; 1355 struct intel_connector *dp_connector = NULL;
1356 bool init_hdmi, init_dp;
1357
1358 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
1359 dev_priv->vbt.ddi_port_info[port].supports_hdmi);
1360 init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
1361 if (!init_dp && !init_hdmi) {
1362 DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
1363 port_name(port));
1364 init_hdmi = true;
1365 init_dp = true;
1366 }
1308 1367
1309 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1368 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1310 if (!intel_dig_port) 1369 if (!intel_dig_port)
1311 return; 1370 return;
1312 1371
1313 dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1372 dp_connector = kzalloc(sizeof(*dp_connector), GFP_KERNEL);
1314 if (!dp_connector) { 1373 if (!dp_connector) {
1315 kfree(intel_dig_port); 1374 kfree(intel_dig_port);
1316 return; 1375 return;
@@ -1342,19 +1401,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1342 intel_encoder->cloneable = false; 1401 intel_encoder->cloneable = false;
1343 intel_encoder->hot_plug = intel_ddi_hot_plug; 1402 intel_encoder->hot_plug = intel_ddi_hot_plug;
1344 1403
1345 if (!intel_dp_init_connector(intel_dig_port, dp_connector)) { 1404 if (init_dp && !intel_dp_init_connector(intel_dig_port, dp_connector)) {
1346 drm_encoder_cleanup(encoder); 1405 drm_encoder_cleanup(encoder);
1347 kfree(intel_dig_port); 1406 kfree(intel_dig_port);
1348 kfree(dp_connector); 1407 kfree(dp_connector);
1349 return; 1408 return;
1350 } 1409 }
1351 1410
1352 if (intel_encoder->type != INTEL_OUTPUT_EDP) { 1411 /* In theory we don't need the encoder->type check, but leave it just in
1353 hdmi_connector = kzalloc(sizeof(struct intel_connector), 1412 * case we have some really bad VBTs... */
1413 if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
1414 hdmi_connector = kzalloc(sizeof(*hdmi_connector),
1354 GFP_KERNEL); 1415 GFP_KERNEL);
1355 if (!hdmi_connector) { 1416 if (!hdmi_connector)
1356 return; 1417 return;
1357 }
1358 1418
1359 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); 1419 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
1360 intel_hdmi_init_connector(intel_dig_port, hdmi_connector); 1420 intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e5822e79f912..617b963dfb67 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,14 +41,13 @@
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h> 42#include <linux/dma_remapping.h>
43 43
44bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 44static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 45static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 46
48static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 47static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
49 struct intel_crtc_config *pipe_config); 48 struct intel_crtc_config *pipe_config);
50static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 49static void ironlake_pch_clock_get(struct intel_crtc *crtc,
51 struct intel_crtc_config *pipe_config); 50 struct intel_crtc_config *pipe_config);
52 51
53static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 52static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
54 int x, int y, struct drm_framebuffer *old_fb); 53 int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@ struct intel_limit {
69 intel_p2_t p2; 68 intel_p2_t p2;
70}; 69};
71 70
72/* FDI */
73#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
74
75int 71int
76intel_pch_rawclk(struct drm_device *dev) 72intel_pch_rawclk(struct drm_device *dev)
77{ 73{
@@ -339,18 +335,20 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
339 .p2_slow = 2, .p2_fast = 20 }, 335 .p2_slow = 2, .p2_fast = 20 },
340}; 336};
341 337
342static const intel_limit_t intel_limits_vlv_dp = { 338/**
343 .dot = { .min = 25000, .max = 270000 }, 339 * Returns whether any output on the specified pipe is of the specified type
344 .vco = { .min = 4000000, .max = 6000000 }, 340 */
345 .n = { .min = 1, .max = 7 }, 341static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
346 .m = { .min = 22, .max = 450 }, 342{
347 .m1 = { .min = 2, .max = 3 }, 343 struct drm_device *dev = crtc->dev;
348 .m2 = { .min = 11, .max = 156 }, 344 struct intel_encoder *encoder;
349 .p = { .min = 10, .max = 30 }, 345
350 .p1 = { .min = 1, .max = 3 }, 346 for_each_encoder_on_crtc(dev, crtc, encoder)
351 .p2 = { .dot_limit = 270000, 347 if (encoder->type == type)
352 .p2_slow = 2, .p2_fast = 20 }, 348 return true;
353}; 349
350 return false;
351}
354 352
355static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 353static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
356 int refclk) 354 int refclk)
@@ -414,10 +412,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
414 } else if (IS_VALLEYVIEW(dev)) { 412 } else if (IS_VALLEYVIEW(dev)) {
415 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) 413 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
416 limit = &intel_limits_vlv_dac; 414 limit = &intel_limits_vlv_dac;
417 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
418 limit = &intel_limits_vlv_hdmi;
419 else 415 else
420 limit = &intel_limits_vlv_dp; 416 limit = &intel_limits_vlv_hdmi;
421 } else if (!IS_GEN2(dev)) { 417 } else if (!IS_GEN2(dev)) {
422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 418 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
423 limit = &intel_limits_i9xx_lvds; 419 limit = &intel_limits_i9xx_lvds;
@@ -456,21 +452,6 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
456 clock->dot = clock->vco / clock->p; 452 clock->dot = clock->vco / clock->p;
457} 453}
458 454
459/**
460 * Returns whether any output on the specified pipe is of the specified type
461 */
462bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
463{
464 struct drm_device *dev = crtc->dev;
465 struct intel_encoder *encoder;
466
467 for_each_encoder_on_crtc(dev, crtc, encoder)
468 if (encoder->type == type)
469 return true;
470
471 return false;
472}
473
474#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 455#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
475/** 456/**
476 * Returns whether the given set of divisors are valid for a given refclk with 457 * Returns whether the given set of divisors are valid for a given refclk with
@@ -714,29 +695,30 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
714 p = p1 * p2; 695 p = p1 * p2;
715 /* based on hardware requirement, prefer bigger m1,m2 values */ 696 /* based on hardware requirement, prefer bigger m1,m2 values */
716 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { 697 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
717 m2 = (((2*(fastclk * p * n / m1 )) + 698 m2 = DIV_ROUND_CLOSEST(fastclk * p * n, refclk * m1);
718 refclk) / (2*refclk));
719 m = m1 * m2; 699 m = m1 * m2;
720 vco = updrate * m; 700 vco = updrate * m;
721 if (vco >= limit->vco.min && vco < limit->vco.max) { 701
722 ppm = 1000000 * ((vco / p) - fastclk) / fastclk; 702 if (vco < limit->vco.min || vco >= limit->vco.max)
723 absppm = (ppm > 0) ? ppm : (-ppm); 703 continue;
724 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { 704
725 bestppm = 0; 705 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
726 flag = 1; 706 absppm = (ppm > 0) ? ppm : (-ppm);
727 } 707 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
728 if (absppm < bestppm - 10) { 708 bestppm = 0;
729 bestppm = absppm; 709 flag = 1;
730 flag = 1; 710 }
731 } 711 if (absppm < bestppm - 10) {
732 if (flag) { 712 bestppm = absppm;
733 bestn = n; 713 flag = 1;
734 bestm1 = m1; 714 }
735 bestm2 = m2; 715 if (flag) {
736 bestp1 = p1; 716 bestn = n;
737 bestp2 = p2; 717 bestm1 = m1;
738 flag = 0; 718 bestm2 = m2;
739 } 719 bestp1 = p1;
720 bestp2 = p2;
721 flag = 0;
740 } 722 }
741 } 723 }
742 } 724 }
@@ -751,6 +733,23 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
751 return true; 733 return true;
752} 734}
753 735
736bool intel_crtc_active(struct drm_crtc *crtc)
737{
738 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
739
740 /* Be paranoid as we can arrive here with only partial
741 * state retrieved from the hardware during setup.
742 *
743 * We can ditch the adjusted_mode.crtc_clock check as soon
744 * as Haswell has gained clock readout/fastboot support.
745 *
746 * We can ditch the crtc->fb check as soon as we can
747 * properly reconstruct framebuffers.
748 */
749 return intel_crtc->active && crtc->fb &&
750 intel_crtc->config.adjusted_mode.crtc_clock;
751}
752
754enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 753enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
755 enum pipe pipe) 754 enum pipe pipe)
756{ 755{
@@ -929,6 +928,24 @@ void assert_pll(struct drm_i915_private *dev_priv,
929 state_string(state), state_string(cur_state)); 928 state_string(state), state_string(cur_state));
930} 929}
931 930
931/* XXX: the dsi pll is shared between MIPI DSI ports */
932static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
933{
934 u32 val;
935 bool cur_state;
936
937 mutex_lock(&dev_priv->dpio_lock);
938 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
939 mutex_unlock(&dev_priv->dpio_lock);
940
941 cur_state = val & DSI_PLL_VCO_EN;
942 WARN(cur_state != state,
943 "DSI PLL state assertion failure (expected %s, current %s)\n",
944 state_string(state), state_string(cur_state));
945}
946#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
947#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
948
932struct intel_shared_dpll * 949struct intel_shared_dpll *
933intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 950intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
934{ 951{
@@ -1069,6 +1086,26 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1069 pipe_name(pipe)); 1086 pipe_name(pipe));
1070} 1087}
1071 1088
1089static void assert_cursor(struct drm_i915_private *dev_priv,
1090 enum pipe pipe, bool state)
1091{
1092 struct drm_device *dev = dev_priv->dev;
1093 bool cur_state;
1094
1095 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1096 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1097 else if (IS_845G(dev) || IS_I865G(dev))
1098 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1099 else
1100 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1101
1102 WARN(cur_state != state,
1103 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1104 pipe_name(pipe), state_string(state), state_string(cur_state));
1105}
1106#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1107#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1108
1072void assert_pipe(struct drm_i915_private *dev_priv, 1109void assert_pipe(struct drm_i915_private *dev_priv,
1073 enum pipe pipe, bool state) 1110 enum pipe pipe, bool state)
1074{ 1111{
@@ -1323,6 +1360,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1323 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1360 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1324} 1361}
1325 1362
1363static void intel_init_dpio(struct drm_device *dev)
1364{
1365 struct drm_i915_private *dev_priv = dev->dev_private;
1366
1367 if (!IS_VALLEYVIEW(dev))
1368 return;
1369
1370 /*
1371 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1372 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1373 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1374 * b. The other bits such as sfr settings / modesel may all be set
1375 * to 0.
1376 *
1377 * This should only be done on init and resume from S3 with both
1378 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1379 */
1380 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1381}
1382
1326static void vlv_enable_pll(struct intel_crtc *crtc) 1383static void vlv_enable_pll(struct intel_crtc *crtc)
1327{ 1384{
1328 struct drm_device *dev = crtc->base.dev; 1385 struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1486,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1429 POSTING_READ(DPLL(pipe)); 1486 POSTING_READ(DPLL(pipe));
1430} 1487}
1431 1488
1489static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1490{
1491 u32 val = 0;
1492
1493 /* Make sure the pipe isn't still relying on us */
1494 assert_pipe_disabled(dev_priv, pipe);
1495
1496 /* Leave integrated clock source enabled */
1497 if (pipe == PIPE_B)
1498 val = DPLL_INTEGRATED_CRI_CLK_VLV;
1499 I915_WRITE(DPLL(pipe), val);
1500 POSTING_READ(DPLL(pipe));
1501}
1502
1432void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1503void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1433{ 1504{
1434 u32 port_mask; 1505 u32 port_mask;
@@ -1661,7 +1732,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1661 * returning. 1732 * returning.
1662 */ 1733 */
1663static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1734static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1664 bool pch_port) 1735 bool pch_port, bool dsi)
1665{ 1736{
1666 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1737 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1667 pipe); 1738 pipe);
@@ -1670,6 +1741,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1670 u32 val; 1741 u32 val;
1671 1742
1672 assert_planes_disabled(dev_priv, pipe); 1743 assert_planes_disabled(dev_priv, pipe);
1744 assert_cursor_disabled(dev_priv, pipe);
1673 assert_sprites_disabled(dev_priv, pipe); 1745 assert_sprites_disabled(dev_priv, pipe);
1674 1746
1675 if (HAS_PCH_LPT(dev_priv->dev)) 1747 if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1755,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1683 * need the check. 1755 * need the check.
1684 */ 1756 */
1685 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1757 if (!HAS_PCH_SPLIT(dev_priv->dev))
1686 assert_pll_enabled(dev_priv, pipe); 1758 if (dsi)
1759 assert_dsi_pll_enabled(dev_priv);
1760 else
1761 assert_pll_enabled(dev_priv, pipe);
1687 else { 1762 else {
1688 if (pch_port) { 1763 if (pch_port) {
1689 /* if driving the PCH, we need FDI enabled */ 1764 /* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1803,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1728 * or we might hang the display. 1803 * or we might hang the display.
1729 */ 1804 */
1730 assert_planes_disabled(dev_priv, pipe); 1805 assert_planes_disabled(dev_priv, pipe);
1806 assert_cursor_disabled(dev_priv, pipe);
1731 assert_sprites_disabled(dev_priv, pipe); 1807 assert_sprites_disabled(dev_priv, pipe);
1732 1808
1733 /* Don't disable pipe A or pipe A PLLs if needed */ 1809 /* Don't disable pipe A or pipe A PLLs if needed */
@@ -2244,11 +2320,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2244 return ret; 2320 return ret;
2245 } 2321 }
2246 2322
2247 /* Update pipe size and adjust fitter if needed */ 2323 /*
2324 * Update pipe size and adjust fitter if needed: the reason for this is
2325 * that in compute_mode_changes we check the native mode (not the pfit
2326 * mode) to see if we can flip rather than do a full mode set. In the
2327 * fastboot case, we'll flip, but if we don't update the pipesrc and
2328 * pfit state, we'll end up with a big fb scanned out into the wrong
2329 * sized surface.
2330 *
2331 * To fix this properly, we need to hoist the checks up into
2332 * compute_mode_changes (or above), check the actual pfit state and
2333 * whether the platform allows pfit disable with pipe active, and only
2334 * then update the pipesrc and pfit state, even on the flip path.
2335 */
2248 if (i915_fastboot) { 2336 if (i915_fastboot) {
2337 const struct drm_display_mode *adjusted_mode =
2338 &intel_crtc->config.adjusted_mode;
2339
2249 I915_WRITE(PIPESRC(intel_crtc->pipe), 2340 I915_WRITE(PIPESRC(intel_crtc->pipe),
2250 ((crtc->mode.hdisplay - 1) << 16) | 2341 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2251 (crtc->mode.vdisplay - 1)); 2342 (adjusted_mode->crtc_vdisplay - 1));
2252 if (!intel_crtc->config.pch_pfit.enabled && 2343 if (!intel_crtc->config.pch_pfit.enabled &&
2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2344 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2345 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2872,6 +2963,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2872{ 2963{
2873 struct drm_device *dev = crtc->dev; 2964 struct drm_device *dev = crtc->dev;
2874 struct drm_i915_private *dev_priv = dev->dev_private; 2965 struct drm_i915_private *dev_priv = dev->dev_private;
2966 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2875 u32 divsel, phaseinc, auxdiv, phasedir = 0; 2967 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2876 u32 temp; 2968 u32 temp;
2877 2969
@@ -2889,14 +2981,14 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2889 SBI_ICLK); 2981 SBI_ICLK);
2890 2982
2891 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 2983 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2892 if (crtc->mode.clock == 20000) { 2984 if (clock == 20000) {
2893 auxdiv = 1; 2985 auxdiv = 1;
2894 divsel = 0x41; 2986 divsel = 0x41;
2895 phaseinc = 0x20; 2987 phaseinc = 0x20;
2896 } else { 2988 } else {
2897 /* The iCLK virtual clock root frequency is in MHz, 2989 /* The iCLK virtual clock root frequency is in MHz,
2898 * but the crtc->mode.clock in in KHz. To get the divisors, 2990 * but the adjusted_mode->crtc_clock in in KHz. To get the
2899 * it is necessary to divide one by another, so we 2991 * divisors, it is necessary to divide one by another, so we
2900 * convert the virtual clock precision to KHz here for higher 2992 * convert the virtual clock precision to KHz here for higher
2901 * precision. 2993 * precision.
2902 */ 2994 */
@@ -2904,7 +2996,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2904 u32 iclk_pi_range = 64; 2996 u32 iclk_pi_range = 64;
2905 u32 desired_divisor, msb_divisor_value, pi_value; 2997 u32 desired_divisor, msb_divisor_value, pi_value;
2906 2998
2907 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); 2999 desired_divisor = (iclk_virtual_root_freq / clock);
2908 msb_divisor_value = desired_divisor / iclk_pi_range; 3000 msb_divisor_value = desired_divisor / iclk_pi_range;
2909 pi_value = desired_divisor % iclk_pi_range; 3001 pi_value = desired_divisor % iclk_pi_range;
2910 3002
@@ -2920,7 +3012,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2920 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3012 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2921 3013
2922 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3014 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2923 crtc->mode.clock, 3015 clock,
2924 auxdiv, 3016 auxdiv,
2925 divsel, 3017 divsel,
2926 phasedir, 3018 phasedir,
@@ -3240,6 +3332,84 @@ static void intel_disable_planes(struct drm_crtc *crtc)
3240 intel_plane_disable(&intel_plane->base); 3332 intel_plane_disable(&intel_plane->base);
3241} 3333}
3242 3334
3335static void hsw_enable_ips(struct intel_crtc *crtc)
3336{
3337 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3338
3339 if (!crtc->config.ips_enabled)
3340 return;
3341
3342 /* We can only enable IPS after we enable a plane and wait for a vblank.
3343 * We guarantee that the plane is enabled by calling intel_enable_ips
3344 * only after intel_enable_plane. And intel_enable_plane already waits
3345 * for a vblank, so all we need to do here is to enable the IPS bit. */
3346 assert_plane_enabled(dev_priv, crtc->plane);
3347 I915_WRITE(IPS_CTL, IPS_ENABLE);
3348}
3349
3350static void hsw_disable_ips(struct intel_crtc *crtc)
3351{
3352 struct drm_device *dev = crtc->base.dev;
3353 struct drm_i915_private *dev_priv = dev->dev_private;
3354
3355 if (!crtc->config.ips_enabled)
3356 return;
3357
3358 assert_plane_enabled(dev_priv, crtc->plane);
3359 I915_WRITE(IPS_CTL, 0);
3360 POSTING_READ(IPS_CTL);
3361
3362 /* We need to wait for a vblank before we can disable the plane. */
3363 intel_wait_for_vblank(dev, crtc->pipe);
3364}
3365
3366/** Loads the palette/gamma unit for the CRTC with the prepared values */
3367static void intel_crtc_load_lut(struct drm_crtc *crtc)
3368{
3369 struct drm_device *dev = crtc->dev;
3370 struct drm_i915_private *dev_priv = dev->dev_private;
3371 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3372 enum pipe pipe = intel_crtc->pipe;
3373 int palreg = PALETTE(pipe);
3374 int i;
3375 bool reenable_ips = false;
3376
3377 /* The clocks have to be on to load the palette. */
3378 if (!crtc->enabled || !intel_crtc->active)
3379 return;
3380
3381 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3382 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3383 assert_dsi_pll_enabled(dev_priv);
3384 else
3385 assert_pll_enabled(dev_priv, pipe);
3386 }
3387
3388 /* use legacy palette for Ironlake */
3389 if (HAS_PCH_SPLIT(dev))
3390 palreg = LGC_PALETTE(pipe);
3391
3392 /* Workaround : Do not read or write the pipe palette/gamma data while
3393 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3394 */
3395 if (intel_crtc->config.ips_enabled &&
3396 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3397 GAMMA_MODE_MODE_SPLIT)) {
3398 hsw_disable_ips(intel_crtc);
3399 reenable_ips = true;
3400 }
3401
3402 for (i = 0; i < 256; i++) {
3403 I915_WRITE(palreg + 4 * i,
3404 (intel_crtc->lut_r[i] << 16) |
3405 (intel_crtc->lut_g[i] << 8) |
3406 intel_crtc->lut_b[i]);
3407 }
3408
3409 if (reenable_ips)
3410 hsw_enable_ips(intel_crtc);
3411}
3412
3243static void ironlake_crtc_enable(struct drm_crtc *crtc) 3413static void ironlake_crtc_enable(struct drm_crtc *crtc)
3244{ 3414{
3245 struct drm_device *dev = crtc->dev; 3415 struct drm_device *dev = crtc->dev;
@@ -3259,8 +3429,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3259 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 3429 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3260 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 3430 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3261 3431
3262 intel_update_watermarks(dev);
3263
3264 for_each_encoder_on_crtc(dev, crtc, encoder) 3432 for_each_encoder_on_crtc(dev, crtc, encoder)
3265 if (encoder->pre_enable) 3433 if (encoder->pre_enable)
3266 encoder->pre_enable(encoder); 3434 encoder->pre_enable(encoder);
@@ -3283,8 +3451,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3283 */ 3451 */
3284 intel_crtc_load_lut(crtc); 3452 intel_crtc_load_lut(crtc);
3285 3453
3454 intel_update_watermarks(crtc);
3286 intel_enable_pipe(dev_priv, pipe, 3455 intel_enable_pipe(dev_priv, pipe,
3287 intel_crtc->config.has_pch_encoder); 3456 intel_crtc->config.has_pch_encoder, false);
3288 intel_enable_plane(dev_priv, plane, pipe); 3457 intel_enable_plane(dev_priv, plane, pipe);
3289 intel_enable_planes(crtc); 3458 intel_enable_planes(crtc);
3290 intel_crtc_update_cursor(crtc, true); 3459 intel_crtc_update_cursor(crtc, true);
@@ -3319,34 +3488,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3319 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 3488 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3320} 3489}
3321 3490
3322static void hsw_enable_ips(struct intel_crtc *crtc) 3491static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3323{ 3492{
3324 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3493 struct drm_device *dev = crtc->dev;
3494 struct drm_i915_private *dev_priv = dev->dev_private;
3495 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3496 int pipe = intel_crtc->pipe;
3497 int plane = intel_crtc->plane;
3325 3498
3326 if (!crtc->config.ips_enabled) 3499 intel_enable_plane(dev_priv, plane, pipe);
3327 return; 3500 intel_enable_planes(crtc);
3501 intel_crtc_update_cursor(crtc, true);
3328 3502
3329 /* We can only enable IPS after we enable a plane and wait for a vblank. 3503 hsw_enable_ips(intel_crtc);
3330 * We guarantee that the plane is enabled by calling intel_enable_ips 3504
3331 * only after intel_enable_plane. And intel_enable_plane already waits 3505 mutex_lock(&dev->struct_mutex);
3332 * for a vblank, so all we need to do here is to enable the IPS bit. */ 3506 intel_update_fbc(dev);
3333 assert_plane_enabled(dev_priv, crtc->plane); 3507 mutex_unlock(&dev->struct_mutex);
3334 I915_WRITE(IPS_CTL, IPS_ENABLE);
3335} 3508}
3336 3509
3337static void hsw_disable_ips(struct intel_crtc *crtc) 3510static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3338{ 3511{
3339 struct drm_device *dev = crtc->base.dev; 3512 struct drm_device *dev = crtc->dev;
3340 struct drm_i915_private *dev_priv = dev->dev_private; 3513 struct drm_i915_private *dev_priv = dev->dev_private;
3514 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3515 int pipe = intel_crtc->pipe;
3516 int plane = intel_crtc->plane;
3341 3517
3342 if (!crtc->config.ips_enabled) 3518 intel_crtc_wait_for_pending_flips(crtc);
3343 return; 3519 drm_vblank_off(dev, pipe);
3344 3520
3345 assert_plane_enabled(dev_priv, crtc->plane); 3521 /* FBC must be disabled before disabling the plane on HSW. */
3346 I915_WRITE(IPS_CTL, 0); 3522 if (dev_priv->fbc.plane == plane)
3523 intel_disable_fbc(dev);
3347 3524
3348 /* We need to wait for a vblank before we can disable the plane. */ 3525 hsw_disable_ips(intel_crtc);
3349 intel_wait_for_vblank(dev, crtc->pipe); 3526
3527 intel_crtc_update_cursor(crtc, false);
3528 intel_disable_planes(crtc);
3529 intel_disable_plane(dev_priv, plane, pipe);
3530}
3531
3532/*
3533 * This implements the workaround described in the "notes" section of the mode
3534 * set sequence documentation. When going from no pipes or single pipe to
3535 * multiple pipes, and planes are enabled after the pipe, we need to wait at
3536 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3537 */
3538static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3539{
3540 struct drm_device *dev = crtc->base.dev;
3541 struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3542
3543 /* We want to get the other_active_crtc only if there's only 1 other
3544 * active crtc. */
3545 list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3546 if (!crtc_it->active || crtc_it == crtc)
3547 continue;
3548
3549 if (other_active_crtc)
3550 return;
3551
3552 other_active_crtc = crtc_it;
3553 }
3554 if (!other_active_crtc)
3555 return;
3556
3557 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3558 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3350} 3559}
3351 3560
3352static void haswell_crtc_enable(struct drm_crtc *crtc) 3561static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3356,7 +3565,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3356 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3565 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3357 struct intel_encoder *encoder; 3566 struct intel_encoder *encoder;
3358 int pipe = intel_crtc->pipe; 3567 int pipe = intel_crtc->pipe;
3359 int plane = intel_crtc->plane;
3360 3568
3361 WARN_ON(!crtc->enabled); 3569 WARN_ON(!crtc->enabled);
3362 3570
@@ -3369,8 +3577,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3369 if (intel_crtc->config.has_pch_encoder) 3577 if (intel_crtc->config.has_pch_encoder)
3370 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 3578 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3371 3579
3372 intel_update_watermarks(dev);
3373
3374 if (intel_crtc->config.has_pch_encoder) 3580 if (intel_crtc->config.has_pch_encoder)
3375 dev_priv->display.fdi_link_train(crtc); 3581 dev_priv->display.fdi_link_train(crtc);
3376 3582
@@ -3391,23 +3597,22 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3391 intel_ddi_set_pipe_settings(crtc); 3597 intel_ddi_set_pipe_settings(crtc);
3392 intel_ddi_enable_transcoder_func(crtc); 3598 intel_ddi_enable_transcoder_func(crtc);
3393 3599
3600 intel_update_watermarks(crtc);
3394 intel_enable_pipe(dev_priv, pipe, 3601 intel_enable_pipe(dev_priv, pipe,
3395 intel_crtc->config.has_pch_encoder); 3602 intel_crtc->config.has_pch_encoder, false);
3396 intel_enable_plane(dev_priv, plane, pipe);
3397 intel_enable_planes(crtc);
3398 intel_crtc_update_cursor(crtc, true);
3399
3400 hsw_enable_ips(intel_crtc);
3401 3603
3402 if (intel_crtc->config.has_pch_encoder) 3604 if (intel_crtc->config.has_pch_encoder)
3403 lpt_pch_enable(crtc); 3605 lpt_pch_enable(crtc);
3404 3606
3405 mutex_lock(&dev->struct_mutex); 3607 for_each_encoder_on_crtc(dev, crtc, encoder) {
3406 intel_update_fbc(dev);
3407 mutex_unlock(&dev->struct_mutex);
3408
3409 for_each_encoder_on_crtc(dev, crtc, encoder)
3410 encoder->enable(encoder); 3608 encoder->enable(encoder);
3609 intel_opregion_notify_encoder(encoder, true);
3610 }
3611
3612 /* If we change the relative order between pipe/planes enabling, we need
3613 * to change the workaround. */
3614 haswell_mode_set_planes_workaround(intel_crtc);
3615 haswell_crtc_enable_planes(crtc);
3411 3616
3412 /* 3617 /*
3413 * There seems to be a race in PCH platform hw (at least on some 3618 * There seems to be a race in PCH platform hw (at least on some
@@ -3501,7 +3706,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3501 } 3706 }
3502 3707
3503 intel_crtc->active = false; 3708 intel_crtc->active = false;
3504 intel_update_watermarks(dev); 3709 intel_update_watermarks(crtc);
3505 3710
3506 mutex_lock(&dev->struct_mutex); 3711 mutex_lock(&dev->struct_mutex);
3507 intel_update_fbc(dev); 3712 intel_update_fbc(dev);
@@ -3515,27 +3720,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3515 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3720 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3516 struct intel_encoder *encoder; 3721 struct intel_encoder *encoder;
3517 int pipe = intel_crtc->pipe; 3722 int pipe = intel_crtc->pipe;
3518 int plane = intel_crtc->plane;
3519 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 3723 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3520 3724
3521 if (!intel_crtc->active) 3725 if (!intel_crtc->active)
3522 return; 3726 return;
3523 3727
3524 for_each_encoder_on_crtc(dev, crtc, encoder) 3728 haswell_crtc_disable_planes(crtc);
3525 encoder->disable(encoder);
3526
3527 intel_crtc_wait_for_pending_flips(crtc);
3528 drm_vblank_off(dev, pipe);
3529
3530 /* FBC must be disabled before disabling the plane on HSW. */
3531 if (dev_priv->fbc.plane == plane)
3532 intel_disable_fbc(dev);
3533
3534 hsw_disable_ips(intel_crtc);
3535 3729
3536 intel_crtc_update_cursor(crtc, false); 3730 for_each_encoder_on_crtc(dev, crtc, encoder) {
3537 intel_disable_planes(crtc); 3731 intel_opregion_notify_encoder(encoder, false);
3538 intel_disable_plane(dev_priv, plane, pipe); 3732 encoder->disable(encoder);
3733 }
3539 3734
3540 if (intel_crtc->config.has_pch_encoder) 3735 if (intel_crtc->config.has_pch_encoder)
3541 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 3736 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3558,7 +3753,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3558 } 3753 }
3559 3754
3560 intel_crtc->active = false; 3755 intel_crtc->active = false;
3561 intel_update_watermarks(dev); 3756 intel_update_watermarks(crtc);
3562 3757
3563 mutex_lock(&dev->struct_mutex); 3758 mutex_lock(&dev->struct_mutex);
3564 intel_update_fbc(dev); 3759 intel_update_fbc(dev);
@@ -3650,6 +3845,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3650 struct intel_encoder *encoder; 3845 struct intel_encoder *encoder;
3651 int pipe = intel_crtc->pipe; 3846 int pipe = intel_crtc->pipe;
3652 int plane = intel_crtc->plane; 3847 int plane = intel_crtc->plane;
3848 bool is_dsi;
3653 3849
3654 WARN_ON(!crtc->enabled); 3850 WARN_ON(!crtc->enabled);
3655 3851
@@ -3657,13 +3853,15 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3657 return; 3853 return;
3658 3854
3659 intel_crtc->active = true; 3855 intel_crtc->active = true;
3660 intel_update_watermarks(dev);
3661 3856
3662 for_each_encoder_on_crtc(dev, crtc, encoder) 3857 for_each_encoder_on_crtc(dev, crtc, encoder)
3663 if (encoder->pre_pll_enable) 3858 if (encoder->pre_pll_enable)
3664 encoder->pre_pll_enable(encoder); 3859 encoder->pre_pll_enable(encoder);
3665 3860
3666 vlv_enable_pll(intel_crtc); 3861 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
3862
3863 if (!is_dsi)
3864 vlv_enable_pll(intel_crtc);
3667 3865
3668 for_each_encoder_on_crtc(dev, crtc, encoder) 3866 for_each_encoder_on_crtc(dev, crtc, encoder)
3669 if (encoder->pre_enable) 3867 if (encoder->pre_enable)
@@ -3673,7 +3871,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3673 3871
3674 intel_crtc_load_lut(crtc); 3872 intel_crtc_load_lut(crtc);
3675 3873
3676 intel_enable_pipe(dev_priv, pipe, false); 3874 intel_update_watermarks(crtc);
3875 intel_enable_pipe(dev_priv, pipe, false, is_dsi);
3677 intel_enable_plane(dev_priv, plane, pipe); 3876 intel_enable_plane(dev_priv, plane, pipe);
3678 intel_enable_planes(crtc); 3877 intel_enable_planes(crtc);
3679 intel_crtc_update_cursor(crtc, true); 3878 intel_crtc_update_cursor(crtc, true);
@@ -3699,7 +3898,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3699 return; 3898 return;
3700 3899
3701 intel_crtc->active = true; 3900 intel_crtc->active = true;
3702 intel_update_watermarks(dev);
3703 3901
3704 for_each_encoder_on_crtc(dev, crtc, encoder) 3902 for_each_encoder_on_crtc(dev, crtc, encoder)
3705 if (encoder->pre_enable) 3903 if (encoder->pre_enable)
@@ -3711,7 +3909,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3711 3909
3712 intel_crtc_load_lut(crtc); 3910 intel_crtc_load_lut(crtc);
3713 3911
3714 intel_enable_pipe(dev_priv, pipe, false); 3912 intel_update_watermarks(crtc);
3913 intel_enable_pipe(dev_priv, pipe, false, false);
3715 intel_enable_plane(dev_priv, plane, pipe); 3914 intel_enable_plane(dev_priv, plane, pipe);
3716 intel_enable_planes(crtc); 3915 intel_enable_planes(crtc);
3717 /* The fixup needs to happen before cursor is enabled */ 3916 /* The fixup needs to happen before cursor is enabled */
@@ -3778,11 +3977,15 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3778 if (encoder->post_disable) 3977 if (encoder->post_disable)
3779 encoder->post_disable(encoder); 3978 encoder->post_disable(encoder);
3780 3979
3781 i9xx_disable_pll(dev_priv, pipe); 3980 if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3981 vlv_disable_pll(dev_priv, pipe);
3982 else if (!IS_VALLEYVIEW(dev))
3983 i9xx_disable_pll(dev_priv, pipe);
3782 3984
3783 intel_crtc->active = false; 3985 intel_crtc->active = false;
3986 intel_update_watermarks(crtc);
3987
3784 intel_update_fbc(dev); 3988 intel_update_fbc(dev);
3785 intel_update_watermarks(dev);
3786} 3989}
3787 3990
3788static void i9xx_crtc_off(struct drm_crtc *crtc) 3991static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3856,6 +4059,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3856 dev_priv->display.off(crtc); 4059 dev_priv->display.off(crtc);
3857 4060
3858 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 4061 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4062 assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
3859 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 4063 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3860 4064
3861 if (crtc->fb) { 4065 if (crtc->fb) {
@@ -4049,8 +4253,7 @@ retry:
4049 */ 4253 */
4050 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4254 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4051 4255
4052 fdi_dotclock = adjusted_mode->clock; 4256 fdi_dotclock = adjusted_mode->crtc_clock;
4053 fdi_dotclock /= pipe_config->pixel_multiplier;
4054 4257
4055 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 4258 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4056 pipe_config->pipe_bpp); 4259 pipe_config->pipe_bpp);
@@ -4092,13 +4295,39 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
4092 struct drm_device *dev = crtc->base.dev; 4295 struct drm_device *dev = crtc->base.dev;
4093 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 4296 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4094 4297
4095 if (HAS_PCH_SPLIT(dev)) { 4298 /* FIXME should check pixel clock limits on all platforms */
4096 /* FDI link clock is fixed at 2.7G */ 4299 if (INTEL_INFO(dev)->gen < 4) {
4097 if (pipe_config->requested_mode.clock * 3 4300 struct drm_i915_private *dev_priv = dev->dev_private;
4098 > IRONLAKE_FDI_FREQ * 4) 4301 int clock_limit =
4302 dev_priv->display.get_display_clock_speed(dev);
4303
4304 /*
4305 * Enable pixel doubling when the dot clock
4306 * is > 90% of the (display) core speed.
4307 *
4308 * GDG double wide on either pipe,
4309 * otherwise pipe A only.
4310 */
4311 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
4312 adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4313 clock_limit *= 2;
4314 pipe_config->double_wide = true;
4315 }
4316
4317 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4099 return -EINVAL; 4318 return -EINVAL;
4100 } 4319 }
4101 4320
4321 /*
4322 * Pipe horizontal size must be even in:
4323 * - DVO ganged mode
4324 * - LVDS dual channel mode
4325 * - Double wide pipe
4326 */
4327 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4328 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
4329 pipe_config->pipe_src_w &= ~1;
4330
4102 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 4331 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4103 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 4332 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4104 */ 4333 */
@@ -4262,28 +4491,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4262 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4491 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4263} 4492}
4264 4493
4265static int vlv_get_refclk(struct drm_crtc *crtc)
4266{
4267 struct drm_device *dev = crtc->dev;
4268 struct drm_i915_private *dev_priv = dev->dev_private;
4269 int refclk = 27000; /* for DP & HDMI */
4270
4271 return 100000; /* only one validated so far */
4272
4273 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4274 refclk = 96000;
4275 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4276 if (intel_panel_use_ssc(dev_priv))
4277 refclk = 100000;
4278 else
4279 refclk = 96000;
4280 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4281 refclk = 100000;
4282 }
4283
4284 return refclk;
4285}
4286
4287static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 4494static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4288{ 4495{
4289 struct drm_device *dev = crtc->dev; 4496 struct drm_device *dev = crtc->dev;
@@ -4291,7 +4498,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4291 int refclk; 4498 int refclk;
4292 4499
4293 if (IS_VALLEYVIEW(dev)) { 4500 if (IS_VALLEYVIEW(dev)) {
4294 refclk = vlv_get_refclk(crtc); 4501 refclk = 100000;
4295 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4502 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4296 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4503 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4297 refclk = dev_priv->vbt.lvds_ssc_freq * 1000; 4504 refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4349,7 +4556,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4349 } 4556 }
4350} 4557}
4351 4558
4352static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv) 4559static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
4560 pipe)
4353{ 4561{
4354 u32 reg_val; 4562 u32 reg_val;
4355 4563
@@ -4357,24 +4565,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
4357 * PLLB opamp always calibrates to max value of 0x3f, force enable it 4565 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4358 * and set it to a reasonable value instead. 4566 * and set it to a reasonable value instead.
4359 */ 4567 */
4360 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); 4568 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
4361 reg_val &= 0xffffff00; 4569 reg_val &= 0xffffff00;
4362 reg_val |= 0x00000030; 4570 reg_val |= 0x00000030;
4363 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4571 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
4364 4572
4365 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); 4573 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
4366 reg_val &= 0x8cffffff; 4574 reg_val &= 0x8cffffff;
4367 reg_val = 0x8c000000; 4575 reg_val = 0x8c000000;
4368 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4576 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
4369 4577
4370 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); 4578 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
4371 reg_val &= 0xffffff00; 4579 reg_val &= 0xffffff00;
4372 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4580 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
4373 4581
4374 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); 4582 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
4375 reg_val &= 0x00ffffff; 4583 reg_val &= 0x00ffffff;
4376 reg_val |= 0xb0000000; 4584 reg_val |= 0xb0000000;
4377 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4585 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
4378} 4586}
4379 4587
4380static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 4588static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4440,18 +4648,18 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4440 4648
4441 /* PLL B needs special handling */ 4649 /* PLL B needs special handling */
4442 if (pipe) 4650 if (pipe)
4443 vlv_pllb_recal_opamp(dev_priv); 4651 vlv_pllb_recal_opamp(dev_priv, pipe);
4444 4652
4445 /* Set up Tx target for periodic Rcomp update */ 4653 /* Set up Tx target for periodic Rcomp update */
4446 vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f); 4654 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
4447 4655
4448 /* Disable target IRef on PLL */ 4656 /* Disable target IRef on PLL */
4449 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe)); 4657 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
4450 reg_val &= 0x00ffffff; 4658 reg_val &= 0x00ffffff;
4451 vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val); 4659 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
4452 4660
4453 /* Disable fast lock */ 4661 /* Disable fast lock */
4454 vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610); 4662 vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
4455 4663
4456 /* Set idtafcrecal before PLL is enabled */ 4664 /* Set idtafcrecal before PLL is enabled */
4457 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4665 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4465,55 +4673,55 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4465 * Note: don't use the DAC post divider as it seems unstable. 4673 * Note: don't use the DAC post divider as it seems unstable.
4466 */ 4674 */
4467 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 4675 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4468 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4676 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
4469 4677
4470 mdiv |= DPIO_ENABLE_CALIBRATION; 4678 mdiv |= DPIO_ENABLE_CALIBRATION;
4471 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4679 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
4472 4680
4473 /* Set HBR and RBR LPF coefficients */ 4681 /* Set HBR and RBR LPF coefficients */
4474 if (crtc->config.port_clock == 162000 || 4682 if (crtc->config.port_clock == 162000 ||
4475 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 4683 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4476 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4684 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4477 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4685 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
4478 0x009f0003); 4686 0x009f0003);
4479 else 4687 else
4480 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4688 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
4481 0x00d0000f); 4689 0x00d0000f);
4482 4690
4483 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 4691 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4484 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 4692 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4485 /* Use SSC source */ 4693 /* Use SSC source */
4486 if (!pipe) 4694 if (!pipe)
4487 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4695 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4488 0x0df40000); 4696 0x0df40000);
4489 else 4697 else
4490 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4698 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4491 0x0df70000); 4699 0x0df70000);
4492 } else { /* HDMI or VGA */ 4700 } else { /* HDMI or VGA */
4493 /* Use bend source */ 4701 /* Use bend source */
4494 if (!pipe) 4702 if (!pipe)
4495 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4703 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4496 0x0df70000); 4704 0x0df70000);
4497 else 4705 else
4498 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4706 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4499 0x0df40000); 4707 0x0df40000);
4500 } 4708 }
4501 4709
4502 coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe)); 4710 coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
4503 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 4711 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4504 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 4712 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4505 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 4713 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4506 coreclk |= 0x01000000; 4714 coreclk |= 0x01000000;
4507 vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk); 4715 vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
4508 4716
4509 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); 4717 vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
4510 4718
4511 /* Enable DPIO clock input */ 4719 /* Enable DPIO clock input */
4512 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4720 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4513 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4721 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4514 if (pipe) 4722 /* We should never disable this, set it here for state tracking */
4723 if (pipe == PIPE_B)
4515 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 4724 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4516
4517 dpll |= DPLL_VCO_ENABLE; 4725 dpll |= DPLL_VCO_ENABLE;
4518 crtc->config.dpll_hw_state.dpll = dpll; 4726 crtc->config.dpll_hw_state.dpll = dpll;
4519 4727
@@ -4651,7 +4859,6 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4651 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4859 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4652 struct drm_display_mode *adjusted_mode = 4860 struct drm_display_mode *adjusted_mode =
4653 &intel_crtc->config.adjusted_mode; 4861 &intel_crtc->config.adjusted_mode;
4654 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4655 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; 4862 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
4656 4863
4657 /* We need to be careful not to changed the adjusted mode, for otherwise 4864 /* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4704,7 +4911,8 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4704 * always be the user's requested size. 4911 * always be the user's requested size.
4705 */ 4912 */
4706 I915_WRITE(PIPESRC(pipe), 4913 I915_WRITE(PIPESRC(pipe),
4707 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4914 ((intel_crtc->config.pipe_src_w - 1) << 16) |
4915 (intel_crtc->config.pipe_src_h - 1));
4708} 4916}
4709 4917
4710static void intel_get_pipe_timings(struct intel_crtc *crtc, 4918static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4742,8 +4950,11 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
4742 } 4950 }
4743 4951
4744 tmp = I915_READ(PIPESRC(crtc->pipe)); 4952 tmp = I915_READ(PIPESRC(crtc->pipe));
4745 pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1; 4953 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4746 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; 4954 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4955
4956 pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
4957 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4747} 4958}
4748 4959
4749static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, 4960static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4763,7 +4974,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4763 4974
4764 crtc->mode.flags = pipe_config->adjusted_mode.flags; 4975 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4765 4976
4766 crtc->mode.clock = pipe_config->adjusted_mode.clock; 4977 crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
4767 crtc->mode.flags |= pipe_config->adjusted_mode.flags; 4978 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4768} 4979}
4769 4980
@@ -4779,17 +4990,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4779 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) 4990 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4780 pipeconf |= PIPECONF_ENABLE; 4991 pipeconf |= PIPECONF_ENABLE;
4781 4992
4782 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4993 if (intel_crtc->config.double_wide)
4783 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4994 pipeconf |= PIPECONF_DOUBLE_WIDE;
4784 * core speed.
4785 *
4786 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4787 * pipe == 0 check?
4788 */
4789 if (intel_crtc->config.requested_mode.clock >
4790 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4791 pipeconf |= PIPECONF_DOUBLE_WIDE;
4792 }
4793 4995
4794 /* only g4x and later have fancy bpc/dither controls */ 4996 /* only g4x and later have fancy bpc/dither controls */
4795 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 4997 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4843,14 +5045,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4843 struct drm_device *dev = crtc->dev; 5045 struct drm_device *dev = crtc->dev;
4844 struct drm_i915_private *dev_priv = dev->dev_private; 5046 struct drm_i915_private *dev_priv = dev->dev_private;
4845 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5047 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4846 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4847 int pipe = intel_crtc->pipe; 5048 int pipe = intel_crtc->pipe;
4848 int plane = intel_crtc->plane; 5049 int plane = intel_crtc->plane;
4849 int refclk, num_connectors = 0; 5050 int refclk, num_connectors = 0;
4850 intel_clock_t clock, reduced_clock; 5051 intel_clock_t clock, reduced_clock;
4851 u32 dspcntr; 5052 u32 dspcntr;
4852 bool ok, has_reduced_clock = false; 5053 bool ok, has_reduced_clock = false;
4853 bool is_lvds = false; 5054 bool is_lvds = false, is_dsi = false;
4854 struct intel_encoder *encoder; 5055 struct intel_encoder *encoder;
4855 const intel_limit_t *limit; 5056 const intel_limit_t *limit;
4856 int ret; 5057 int ret;
@@ -4860,42 +5061,49 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4860 case INTEL_OUTPUT_LVDS: 5061 case INTEL_OUTPUT_LVDS:
4861 is_lvds = true; 5062 is_lvds = true;
4862 break; 5063 break;
5064 case INTEL_OUTPUT_DSI:
5065 is_dsi = true;
5066 break;
4863 } 5067 }
4864 5068
4865 num_connectors++; 5069 num_connectors++;
4866 } 5070 }
4867 5071
4868 refclk = i9xx_get_refclk(crtc, num_connectors); 5072 if (is_dsi)
5073 goto skip_dpll;
4869 5074
4870 /* 5075 if (!intel_crtc->config.clock_set) {
4871 * Returns a set of divisors for the desired target clock with the given 5076 refclk = i9xx_get_refclk(crtc, num_connectors);
4872 * refclk, or FALSE. The returned values represent the clock equation:
4873 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4874 */
4875 limit = intel_limit(crtc, refclk);
4876 ok = dev_priv->display.find_dpll(limit, crtc,
4877 intel_crtc->config.port_clock,
4878 refclk, NULL, &clock);
4879 if (!ok && !intel_crtc->config.clock_set) {
4880 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4881 return -EINVAL;
4882 }
4883 5077
4884 if (is_lvds && dev_priv->lvds_downclock_avail) {
4885 /* 5078 /*
4886 * Ensure we match the reduced clock's P to the target clock. 5079 * Returns a set of divisors for the desired target clock with
4887 * If the clocks don't match, we can't switch the display clock 5080 * the given refclk, or FALSE. The returned values represent
4888 * by using the FP0/FP1. In such case we will disable the LVDS 5081 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
4889 * downclock feature. 5082 * 2) / p1 / p2.
4890 */ 5083 */
4891 has_reduced_clock = 5084 limit = intel_limit(crtc, refclk);
4892 dev_priv->display.find_dpll(limit, crtc, 5085 ok = dev_priv->display.find_dpll(limit, crtc,
4893 dev_priv->lvds_downclock, 5086 intel_crtc->config.port_clock,
4894 refclk, &clock, 5087 refclk, NULL, &clock);
4895 &reduced_clock); 5088 if (!ok) {
4896 } 5089 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4897 /* Compat-code for transition, will disappear. */ 5090 return -EINVAL;
4898 if (!intel_crtc->config.clock_set) { 5091 }
5092
5093 if (is_lvds && dev_priv->lvds_downclock_avail) {
5094 /*
5095 * Ensure we match the reduced clock's P to the target
5096 * clock. If the clocks don't match, we can't switch
5097 * the display clock by using the FP0/FP1. In such case
5098 * we will disable the LVDS downclock feature.
5099 */
5100 has_reduced_clock =
5101 dev_priv->display.find_dpll(limit, crtc,
5102 dev_priv->lvds_downclock,
5103 refclk, &clock,
5104 &reduced_clock);
5105 }
5106 /* Compat-code for transition, will disappear. */
4899 intel_crtc->config.dpll.n = clock.n; 5107 intel_crtc->config.dpll.n = clock.n;
4900 intel_crtc->config.dpll.m1 = clock.m1; 5108 intel_crtc->config.dpll.m1 = clock.m1;
4901 intel_crtc->config.dpll.m2 = clock.m2; 5109 intel_crtc->config.dpll.m2 = clock.m2;
@@ -4903,17 +5111,19 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4903 intel_crtc->config.dpll.p2 = clock.p2; 5111 intel_crtc->config.dpll.p2 = clock.p2;
4904 } 5112 }
4905 5113
4906 if (IS_GEN2(dev)) 5114 if (IS_GEN2(dev)) {
4907 i8xx_update_pll(intel_crtc, 5115 i8xx_update_pll(intel_crtc,
4908 has_reduced_clock ? &reduced_clock : NULL, 5116 has_reduced_clock ? &reduced_clock : NULL,
4909 num_connectors); 5117 num_connectors);
4910 else if (IS_VALLEYVIEW(dev)) 5118 } else if (IS_VALLEYVIEW(dev)) {
4911 vlv_update_pll(intel_crtc); 5119 vlv_update_pll(intel_crtc);
4912 else 5120 } else {
4913 i9xx_update_pll(intel_crtc, 5121 i9xx_update_pll(intel_crtc,
4914 has_reduced_clock ? &reduced_clock : NULL, 5122 has_reduced_clock ? &reduced_clock : NULL,
4915 num_connectors); 5123 num_connectors);
5124 }
4916 5125
5126skip_dpll:
4917 /* Set up the display plane register */ 5127 /* Set up the display plane register */
4918 dspcntr = DISPPLANE_GAMMA_ENABLE; 5128 dspcntr = DISPPLANE_GAMMA_ENABLE;
4919 5129
@@ -4930,8 +5140,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4930 * which should always be the user's requested size. 5140 * which should always be the user's requested size.
4931 */ 5141 */
4932 I915_WRITE(DSPSIZE(plane), 5142 I915_WRITE(DSPSIZE(plane),
4933 ((mode->vdisplay - 1) << 16) | 5143 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4934 (mode->hdisplay - 1)); 5144 (intel_crtc->config.pipe_src_w - 1));
4935 I915_WRITE(DSPPOS(plane), 0); 5145 I915_WRITE(DSPPOS(plane), 0);
4936 5146
4937 i9xx_set_pipeconf(intel_crtc); 5147 i9xx_set_pipeconf(intel_crtc);
@@ -4941,8 +5151,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4941 5151
4942 ret = intel_pipe_set_base(crtc, x, y, fb); 5152 ret = intel_pipe_set_base(crtc, x, y, fb);
4943 5153
4944 intel_update_watermarks(dev);
4945
4946 return ret; 5154 return ret;
4947} 5155}
4948 5156
@@ -4973,6 +5181,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
4973 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 5181 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
4974} 5182}
4975 5183
5184static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5185 struct intel_crtc_config *pipe_config)
5186{
5187 struct drm_device *dev = crtc->base.dev;
5188 struct drm_i915_private *dev_priv = dev->dev_private;
5189 int pipe = pipe_config->cpu_transcoder;
5190 intel_clock_t clock;
5191 u32 mdiv;
5192 int refclk = 100000;
5193
5194 mutex_lock(&dev_priv->dpio_lock);
5195 mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
5196 mutex_unlock(&dev_priv->dpio_lock);
5197
5198 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5199 clock.m2 = mdiv & DPIO_M2DIV_MASK;
5200 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5201 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5202 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5203
5204 clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
5205 clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
5206
5207 pipe_config->port_clock = clock.dot / 10;
5208}
5209
4976static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 5210static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4977 struct intel_crtc_config *pipe_config) 5211 struct intel_crtc_config *pipe_config)
4978{ 5212{
@@ -4987,6 +5221,25 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4987 if (!(tmp & PIPECONF_ENABLE)) 5221 if (!(tmp & PIPECONF_ENABLE))
4988 return false; 5222 return false;
4989 5223
5224 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5225 switch (tmp & PIPECONF_BPC_MASK) {
5226 case PIPECONF_6BPC:
5227 pipe_config->pipe_bpp = 18;
5228 break;
5229 case PIPECONF_8BPC:
5230 pipe_config->pipe_bpp = 24;
5231 break;
5232 case PIPECONF_10BPC:
5233 pipe_config->pipe_bpp = 30;
5234 break;
5235 default:
5236 break;
5237 }
5238 }
5239
5240 if (INTEL_INFO(dev)->gen < 4)
5241 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5242
4990 intel_get_pipe_timings(crtc, pipe_config); 5243 intel_get_pipe_timings(crtc, pipe_config);
4991 5244
4992 i9xx_get_pfit_config(crtc, pipe_config); 5245 i9xx_get_pfit_config(crtc, pipe_config);
@@ -5019,6 +5272,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5019 DPLL_PORTB_READY_MASK); 5272 DPLL_PORTB_READY_MASK);
5020 } 5273 }
5021 5274
5275 if (IS_VALLEYVIEW(dev))
5276 vlv_crtc_clock_get(crtc, pipe_config);
5277 else
5278 i9xx_crtc_clock_get(crtc, pipe_config);
5279
5022 return true; 5280 return true;
5023} 5281}
5024 5282
@@ -5826,25 +6084,67 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5826 6084
5827 ret = intel_pipe_set_base(crtc, x, y, fb); 6085 ret = intel_pipe_set_base(crtc, x, y, fb);
5828 6086
5829 intel_update_watermarks(dev);
5830
5831 return ret; 6087 return ret;
5832} 6088}
5833 6089
5834static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 6090static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5835 struct intel_crtc_config *pipe_config) 6091 struct intel_link_m_n *m_n)
5836{ 6092{
5837 struct drm_device *dev = crtc->base.dev; 6093 struct drm_device *dev = crtc->base.dev;
5838 struct drm_i915_private *dev_priv = dev->dev_private; 6094 struct drm_i915_private *dev_priv = dev->dev_private;
5839 enum transcoder transcoder = pipe_config->cpu_transcoder; 6095 enum pipe pipe = crtc->pipe;
6096
6097 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
6098 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
6099 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
6100 & ~TU_SIZE_MASK;
6101 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
6102 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
6103 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6104}
6105
6106static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6107 enum transcoder transcoder,
6108 struct intel_link_m_n *m_n)
6109{
6110 struct drm_device *dev = crtc->base.dev;
6111 struct drm_i915_private *dev_priv = dev->dev_private;
6112 enum pipe pipe = crtc->pipe;
6113
6114 if (INTEL_INFO(dev)->gen >= 5) {
6115 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
6116 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
6117 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6118 & ~TU_SIZE_MASK;
6119 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6120 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6121 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6122 } else {
6123 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
6124 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
6125 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
6126 & ~TU_SIZE_MASK;
6127 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
6128 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
6129 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6130 }
6131}
5840 6132
5841 pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder)); 6133void intel_dp_get_m_n(struct intel_crtc *crtc,
5842 pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder)); 6134 struct intel_crtc_config *pipe_config)
5843 pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 6135{
5844 & ~TU_SIZE_MASK; 6136 if (crtc->config.has_pch_encoder)
5845 pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 6137 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5846 pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 6138 else
5847 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 6139 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6140 &pipe_config->dp_m_n);
6141}
6142
6143static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6144 struct intel_crtc_config *pipe_config)
6145{
6146 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6147 &pipe_config->fdi_m_n);
5848} 6148}
5849 6149
5850static void ironlake_get_pfit_config(struct intel_crtc *crtc, 6150static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5885,6 +6185,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5885 if (!(tmp & PIPECONF_ENABLE)) 6185 if (!(tmp & PIPECONF_ENABLE))
5886 return false; 6186 return false;
5887 6187
6188 switch (tmp & PIPECONF_BPC_MASK) {
6189 case PIPECONF_6BPC:
6190 pipe_config->pipe_bpp = 18;
6191 break;
6192 case PIPECONF_8BPC:
6193 pipe_config->pipe_bpp = 24;
6194 break;
6195 case PIPECONF_10BPC:
6196 pipe_config->pipe_bpp = 30;
6197 break;
6198 case PIPECONF_12BPC:
6199 pipe_config->pipe_bpp = 36;
6200 break;
6201 default:
6202 break;
6203 }
6204
5888 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 6205 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5889 struct intel_shared_dpll *pll; 6206 struct intel_shared_dpll *pll;
5890 6207
@@ -5916,6 +6233,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5916 pipe_config->pixel_multiplier = 6233 pipe_config->pixel_multiplier =
5917 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 6234 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5918 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 6235 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6236
6237 ironlake_pch_clock_get(crtc, pipe_config);
5919 } else { 6238 } else {
5920 pipe_config->pixel_multiplier = 1; 6239 pipe_config->pixel_multiplier = 1;
5921 } 6240 }
@@ -5972,8 +6291,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5972 * register. Callers should take care of disabling all the display engine 6291 * register. Callers should take care of disabling all the display engine
5973 * functions, doing the mode unset, fixing interrupts, etc. 6292 * functions, doing the mode unset, fixing interrupts, etc.
5974 */ 6293 */
5975void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 6294static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5976 bool switch_to_fclk, bool allow_power_down) 6295 bool switch_to_fclk, bool allow_power_down)
5977{ 6296{
5978 uint32_t val; 6297 uint32_t val;
5979 6298
@@ -6001,7 +6320,10 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6001 6320
6002 val = I915_READ(D_COMP); 6321 val = I915_READ(D_COMP);
6003 val |= D_COMP_COMP_DISABLE; 6322 val |= D_COMP_COMP_DISABLE;
6004 I915_WRITE(D_COMP, val); 6323 mutex_lock(&dev_priv->rps.hw_lock);
6324 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6325 DRM_ERROR("Failed to disable D_COMP\n");
6326 mutex_unlock(&dev_priv->rps.hw_lock);
6005 POSTING_READ(D_COMP); 6327 POSTING_READ(D_COMP);
6006 ndelay(100); 6328 ndelay(100);
6007 6329
@@ -6020,7 +6342,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6020 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 6342 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6021 * source. 6343 * source.
6022 */ 6344 */
6023void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 6345static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6024{ 6346{
6025 uint32_t val; 6347 uint32_t val;
6026 6348
@@ -6043,7 +6365,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6043 val = I915_READ(D_COMP); 6365 val = I915_READ(D_COMP);
6044 val |= D_COMP_COMP_FORCE; 6366 val |= D_COMP_COMP_FORCE;
6045 val &= ~D_COMP_COMP_DISABLE; 6367 val &= ~D_COMP_COMP_DISABLE;
6046 I915_WRITE(D_COMP, val); 6368 mutex_lock(&dev_priv->rps.hw_lock);
6369 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6370 DRM_ERROR("Failed to enable D_COMP\n");
6371 mutex_unlock(&dev_priv->rps.hw_lock);
6047 POSTING_READ(D_COMP); 6372 POSTING_READ(D_COMP);
6048 6373
6049 val = I915_READ(LCPLL_CTL); 6374 val = I915_READ(LCPLL_CTL);
@@ -6280,8 +6605,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6280 6605
6281 ret = intel_pipe_set_base(crtc, x, y, fb); 6606 ret = intel_pipe_set_base(crtc, x, y, fb);
6282 6607
6283 intel_update_watermarks(dev);
6284
6285 return ret; 6608 return ret;
6286} 6609}
6287 6610
@@ -6644,49 +6967,6 @@ void intel_write_eld(struct drm_encoder *encoder,
6644 dev_priv->display.write_eld(connector, crtc); 6967 dev_priv->display.write_eld(connector, crtc);
6645} 6968}
6646 6969
6647/** Loads the palette/gamma unit for the CRTC with the prepared values */
6648void intel_crtc_load_lut(struct drm_crtc *crtc)
6649{
6650 struct drm_device *dev = crtc->dev;
6651 struct drm_i915_private *dev_priv = dev->dev_private;
6652 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6653 enum pipe pipe = intel_crtc->pipe;
6654 int palreg = PALETTE(pipe);
6655 int i;
6656 bool reenable_ips = false;
6657
6658 /* The clocks have to be on to load the palette. */
6659 if (!crtc->enabled || !intel_crtc->active)
6660 return;
6661
6662 if (!HAS_PCH_SPLIT(dev_priv->dev))
6663 assert_pll_enabled(dev_priv, pipe);
6664
6665 /* use legacy palette for Ironlake */
6666 if (HAS_PCH_SPLIT(dev))
6667 palreg = LGC_PALETTE(pipe);
6668
6669 /* Workaround : Do not read or write the pipe palette/gamma data while
6670 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6671 */
6672 if (intel_crtc->config.ips_enabled &&
6673 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
6674 GAMMA_MODE_MODE_SPLIT)) {
6675 hsw_disable_ips(intel_crtc);
6676 reenable_ips = true;
6677 }
6678
6679 for (i = 0; i < 256; i++) {
6680 I915_WRITE(palreg + 4 * i,
6681 (intel_crtc->lut_r[i] << 16) |
6682 (intel_crtc->lut_g[i] << 8) |
6683 intel_crtc->lut_b[i]);
6684 }
6685
6686 if (reenable_ips)
6687 hsw_enable_ips(intel_crtc);
6688}
6689
6690static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 6970static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6691{ 6971{
6692 struct drm_device *dev = crtc->dev; 6972 struct drm_device *dev = crtc->dev;
@@ -6782,23 +7062,20 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6782 int pipe = intel_crtc->pipe; 7062 int pipe = intel_crtc->pipe;
6783 int x = intel_crtc->cursor_x; 7063 int x = intel_crtc->cursor_x;
6784 int y = intel_crtc->cursor_y; 7064 int y = intel_crtc->cursor_y;
6785 u32 base, pos; 7065 u32 base = 0, pos = 0;
6786 bool visible; 7066 bool visible;
6787 7067
6788 pos = 0; 7068 if (on)
6789
6790 if (on && crtc->enabled && crtc->fb) {
6791 base = intel_crtc->cursor_addr; 7069 base = intel_crtc->cursor_addr;
6792 if (x > (int) crtc->fb->width)
6793 base = 0;
6794 7070
6795 if (y > (int) crtc->fb->height) 7071 if (x >= intel_crtc->config.pipe_src_w)
6796 base = 0; 7072 base = 0;
6797 } else 7073
7074 if (y >= intel_crtc->config.pipe_src_h)
6798 base = 0; 7075 base = 0;
6799 7076
6800 if (x < 0) { 7077 if (x < 0) {
6801 if (x + intel_crtc->cursor_width < 0) 7078 if (x + intel_crtc->cursor_width <= 0)
6802 base = 0; 7079 base = 0;
6803 7080
6804 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 7081 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6807,7 +7084,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6807 pos |= x << CURSOR_X_SHIFT; 7084 pos |= x << CURSOR_X_SHIFT;
6808 7085
6809 if (y < 0) { 7086 if (y < 0) {
6810 if (y + intel_crtc->cursor_height < 0) 7087 if (y + intel_crtc->cursor_height <= 0)
6811 base = 0; 7088 base = 0;
6812 7089
6813 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 7090 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6959,27 +7236,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6959 return 0; 7236 return 0;
6960} 7237}
6961 7238
6962/** Sets the color ramps on behalf of RandR */
6963void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6964 u16 blue, int regno)
6965{
6966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6967
6968 intel_crtc->lut_r[regno] = red >> 8;
6969 intel_crtc->lut_g[regno] = green >> 8;
6970 intel_crtc->lut_b[regno] = blue >> 8;
6971}
6972
6973void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6974 u16 *blue, int regno)
6975{
6976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6977
6978 *red = intel_crtc->lut_r[regno] << 8;
6979 *green = intel_crtc->lut_g[regno] << 8;
6980 *blue = intel_crtc->lut_b[regno] << 8;
6981}
6982
6983static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 7239static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6984 u16 *blue, uint32_t start, uint32_t size) 7240 u16 *blue, uint32_t start, uint32_t size)
6985{ 7241{
@@ -7228,6 +7484,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
7228 mutex_unlock(&crtc->mutex); 7484 mutex_unlock(&crtc->mutex);
7229} 7485}
7230 7486
7487static int i9xx_pll_refclk(struct drm_device *dev,
7488 const struct intel_crtc_config *pipe_config)
7489{
7490 struct drm_i915_private *dev_priv = dev->dev_private;
7491 u32 dpll = pipe_config->dpll_hw_state.dpll;
7492
7493 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7494 return dev_priv->vbt.lvds_ssc_freq * 1000;
7495 else if (HAS_PCH_SPLIT(dev))
7496 return 120000;
7497 else if (!IS_GEN2(dev))
7498 return 96000;
7499 else
7500 return 48000;
7501}
7502
7231/* Returns the clock of the currently programmed mode of the given pipe. */ 7503/* Returns the clock of the currently programmed mode of the given pipe. */
7232static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 7504static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7233 struct intel_crtc_config *pipe_config) 7505 struct intel_crtc_config *pipe_config)
@@ -7235,14 +7507,15 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7235 struct drm_device *dev = crtc->base.dev; 7507 struct drm_device *dev = crtc->base.dev;
7236 struct drm_i915_private *dev_priv = dev->dev_private; 7508 struct drm_i915_private *dev_priv = dev->dev_private;
7237 int pipe = pipe_config->cpu_transcoder; 7509 int pipe = pipe_config->cpu_transcoder;
7238 u32 dpll = I915_READ(DPLL(pipe)); 7510 u32 dpll = pipe_config->dpll_hw_state.dpll;
7239 u32 fp; 7511 u32 fp;
7240 intel_clock_t clock; 7512 intel_clock_t clock;
7513 int refclk = i9xx_pll_refclk(dev, pipe_config);
7241 7514
7242 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 7515 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7243 fp = I915_READ(FP0(pipe)); 7516 fp = pipe_config->dpll_hw_state.fp0;
7244 else 7517 else
7245 fp = I915_READ(FP1(pipe)); 7518 fp = pipe_config->dpll_hw_state.fp1;
7246 7519
7247 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 7520 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7248 if (IS_PINEVIEW(dev)) { 7521 if (IS_PINEVIEW(dev)) {
@@ -7273,14 +7546,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7273 default: 7546 default:
7274 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 7547 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7275 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 7548 "mode\n", (int)(dpll & DPLL_MODE_MASK));
7276 pipe_config->adjusted_mode.clock = 0;
7277 return; 7549 return;
7278 } 7550 }
7279 7551
7280 if (IS_PINEVIEW(dev)) 7552 if (IS_PINEVIEW(dev))
7281 pineview_clock(96000, &clock); 7553 pineview_clock(refclk, &clock);
7282 else 7554 else
7283 i9xx_clock(96000, &clock); 7555 i9xx_clock(refclk, &clock);
7284 } else { 7556 } else {
7285 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 7557 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
7286 7558
@@ -7288,13 +7560,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7288 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 7560 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7289 DPLL_FPA01_P1_POST_DIV_SHIFT); 7561 DPLL_FPA01_P1_POST_DIV_SHIFT);
7290 clock.p2 = 14; 7562 clock.p2 = 14;
7291
7292 if ((dpll & PLL_REF_INPUT_MASK) ==
7293 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7294 /* XXX: might not be 66MHz */
7295 i9xx_clock(66000, &clock);
7296 } else
7297 i9xx_clock(48000, &clock);
7298 } else { 7563 } else {
7299 if (dpll & PLL_P1_DIVIDE_BY_TWO) 7564 if (dpll & PLL_P1_DIVIDE_BY_TWO)
7300 clock.p1 = 2; 7565 clock.p1 = 2;
@@ -7306,59 +7571,55 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7306 clock.p2 = 4; 7571 clock.p2 = 4;
7307 else 7572 else
7308 clock.p2 = 2; 7573 clock.p2 = 2;
7309
7310 i9xx_clock(48000, &clock);
7311 } 7574 }
7575
7576 i9xx_clock(refclk, &clock);
7312 } 7577 }
7313 7578
7314 pipe_config->adjusted_mode.clock = clock.dot; 7579 /*
7580 * This value includes pixel_multiplier. We will use
7581 * port_clock to compute adjusted_mode.crtc_clock in the
7582 * encoder's get_config() function.
7583 */
7584 pipe_config->port_clock = clock.dot;
7315} 7585}
7316 7586
7317static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 7587int intel_dotclock_calculate(int link_freq,
7318 struct intel_crtc_config *pipe_config) 7588 const struct intel_link_m_n *m_n)
7319{ 7589{
7320 struct drm_device *dev = crtc->base.dev;
7321 struct drm_i915_private *dev_priv = dev->dev_private;
7322 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7323 int link_freq, repeat;
7324 u64 clock;
7325 u32 link_m, link_n;
7326
7327 repeat = pipe_config->pixel_multiplier;
7328
7329 /* 7590 /*
7330 * The calculation for the data clock is: 7591 * The calculation for the data clock is:
7331 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp 7592 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7332 * But we want to avoid losing precison if possible, so: 7593 * But we want to avoid losing precison if possible, so:
7333 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp)) 7594 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7334 * 7595 *
7335 * and the link clock is simpler: 7596 * and the link clock is simpler:
7336 * link_clock = (m * link_clock * repeat) / n 7597 * link_clock = (m * link_clock) / n
7337 */ 7598 */
7338 7599
7339 /* 7600 if (!m_n->link_n)
7340 * We need to get the FDI or DP link clock here to derive 7601 return 0;
7341 * the M/N dividers.
7342 *
7343 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
7344 * For DP, it's either 1.62GHz or 2.7GHz.
7345 * We do our calculations in 10*MHz since we don't need much precison.
7346 */
7347 if (pipe_config->has_pch_encoder)
7348 link_freq = intel_fdi_link_freq(dev) * 10000;
7349 else
7350 link_freq = pipe_config->port_clock;
7351 7602
7352 link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder)); 7603 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
7353 link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder)); 7604}
7354 7605
7355 if (!link_m || !link_n) 7606static void ironlake_pch_clock_get(struct intel_crtc *crtc,
7356 return; 7607 struct intel_crtc_config *pipe_config)
7608{
7609 struct drm_device *dev = crtc->base.dev;
7357 7610
7358 clock = ((u64)link_m * (u64)link_freq * (u64)repeat); 7611 /* read out port_clock from the DPLL */
7359 do_div(clock, link_n); 7612 i9xx_crtc_clock_get(crtc, pipe_config);
7360 7613
7361 pipe_config->adjusted_mode.clock = clock; 7614 /*
7615 * This value does not include pixel_multiplier.
7616 * We will check that port_clock and adjusted_mode.crtc_clock
7617 * agree once we know their relationship in the encoder's
7618 * get_config() function.
7619 */
7620 pipe_config->adjusted_mode.crtc_clock =
7621 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
7622 &pipe_config->fdi_m_n);
7362} 7623}
7363 7624
7364/** Returns the currently programmed mode of the given pipe. */ 7625/** Returns the currently programmed mode of the given pipe. */
@@ -7374,6 +7635,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7374 int hsync = I915_READ(HSYNC(cpu_transcoder)); 7635 int hsync = I915_READ(HSYNC(cpu_transcoder));
7375 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 7636 int vtot = I915_READ(VTOTAL(cpu_transcoder));
7376 int vsync = I915_READ(VSYNC(cpu_transcoder)); 7637 int vsync = I915_READ(VSYNC(cpu_transcoder));
7638 enum pipe pipe = intel_crtc->pipe;
7377 7639
7378 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 7640 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7379 if (!mode) 7641 if (!mode)
@@ -7386,11 +7648,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7386 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 7648 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7387 * to use a real value here instead. 7649 * to use a real value here instead.
7388 */ 7650 */
7389 pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; 7651 pipe_config.cpu_transcoder = (enum transcoder) pipe;
7390 pipe_config.pixel_multiplier = 1; 7652 pipe_config.pixel_multiplier = 1;
7653 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
7654 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
7655 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
7391 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 7656 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7392 7657
7393 mode->clock = pipe_config.adjusted_mode.clock; 7658 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
7394 mode->hdisplay = (htot & 0xffff) + 1; 7659 mode->hdisplay = (htot & 0xffff) + 1;
7395 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 7660 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7396 mode->hsync_start = (hsync & 0xffff) + 1; 7661 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7496,6 +7761,9 @@ void intel_mark_idle(struct drm_device *dev)
7496 7761
7497 intel_decrease_pllclock(crtc); 7762 intel_decrease_pllclock(crtc);
7498 } 7763 }
7764
7765 if (dev_priv->info->gen >= 6)
7766 gen6_rps_idle(dev->dev_private);
7499} 7767}
7500 7768
7501void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 7769void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7684,7 +7952,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7684 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7952 intel_ring_emit(ring, 0); /* aux display base address, unused */
7685 7953
7686 intel_mark_page_flip_active(intel_crtc); 7954 intel_mark_page_flip_active(intel_crtc);
7687 intel_ring_advance(ring); 7955 __intel_ring_advance(ring);
7688 return 0; 7956 return 0;
7689 7957
7690err_unpin: 7958err_unpin:
@@ -7726,7 +7994,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7726 intel_ring_emit(ring, MI_NOOP); 7994 intel_ring_emit(ring, MI_NOOP);
7727 7995
7728 intel_mark_page_flip_active(intel_crtc); 7996 intel_mark_page_flip_active(intel_crtc);
7729 intel_ring_advance(ring); 7997 __intel_ring_advance(ring);
7730 return 0; 7998 return 0;
7731 7999
7732err_unpin: 8000err_unpin:
@@ -7775,7 +8043,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7775 intel_ring_emit(ring, pf | pipesrc); 8043 intel_ring_emit(ring, pf | pipesrc);
7776 8044
7777 intel_mark_page_flip_active(intel_crtc); 8045 intel_mark_page_flip_active(intel_crtc);
7778 intel_ring_advance(ring); 8046 __intel_ring_advance(ring);
7779 return 0; 8047 return 0;
7780 8048
7781err_unpin: 8049err_unpin:
@@ -7820,7 +8088,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7820 intel_ring_emit(ring, pf | pipesrc); 8088 intel_ring_emit(ring, pf | pipesrc);
7821 8089
7822 intel_mark_page_flip_active(intel_crtc); 8090 intel_mark_page_flip_active(intel_crtc);
7823 intel_ring_advance(ring); 8091 __intel_ring_advance(ring);
7824 return 0; 8092 return 0;
7825 8093
7826err_unpin: 8094err_unpin:
@@ -7899,7 +8167,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7899 intel_ring_emit(ring, (MI_NOOP)); 8167 intel_ring_emit(ring, (MI_NOOP));
7900 8168
7901 intel_mark_page_flip_active(intel_crtc); 8169 intel_mark_page_flip_active(intel_crtc);
7902 intel_ring_advance(ring); 8170 __intel_ring_advance(ring);
7903 return 0; 8171 return 0;
7904 8172
7905err_unpin: 8173err_unpin:
@@ -7944,7 +8212,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7944 fb->pitches[0] != crtc->fb->pitches[0])) 8212 fb->pitches[0] != crtc->fb->pitches[0]))
7945 return -EINVAL; 8213 return -EINVAL;
7946 8214
7947 work = kzalloc(sizeof *work, GFP_KERNEL); 8215 work = kzalloc(sizeof(*work), GFP_KERNEL);
7948 if (work == NULL) 8216 if (work == NULL)
7949 return -ENOMEM; 8217 return -ENOMEM;
7950 8218
@@ -8179,6 +8447,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8179 return bpp; 8447 return bpp;
8180} 8448}
8181 8449
8450static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8451{
8452 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8453 "type: 0x%x flags: 0x%x\n",
8454 mode->crtc_clock,
8455 mode->crtc_hdisplay, mode->crtc_hsync_start,
8456 mode->crtc_hsync_end, mode->crtc_htotal,
8457 mode->crtc_vdisplay, mode->crtc_vsync_start,
8458 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
8459}
8460
8182static void intel_dump_pipe_config(struct intel_crtc *crtc, 8461static void intel_dump_pipe_config(struct intel_crtc *crtc,
8183 struct intel_crtc_config *pipe_config, 8462 struct intel_crtc_config *pipe_config,
8184 const char *context) 8463 const char *context)
@@ -8195,10 +8474,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8195 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 8474 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8196 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 8475 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8197 pipe_config->fdi_m_n.tu); 8476 pipe_config->fdi_m_n.tu);
8477 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8478 pipe_config->has_dp_encoder,
8479 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
8480 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
8481 pipe_config->dp_m_n.tu);
8198 DRM_DEBUG_KMS("requested mode:\n"); 8482 DRM_DEBUG_KMS("requested mode:\n");
8199 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 8483 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8200 DRM_DEBUG_KMS("adjusted mode:\n"); 8484 DRM_DEBUG_KMS("adjusted mode:\n");
8201 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); 8485 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
8486 intel_dump_crtc_timings(&pipe_config->adjusted_mode);
8487 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
8488 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
8489 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
8202 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 8490 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8203 pipe_config->gmch_pfit.control, 8491 pipe_config->gmch_pfit.control,
8204 pipe_config->gmch_pfit.pgm_ratios, 8492 pipe_config->gmch_pfit.pgm_ratios,
@@ -8208,6 +8496,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8208 pipe_config->pch_pfit.size, 8496 pipe_config->pch_pfit.size,
8209 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 8497 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8210 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 8498 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8499 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
8211} 8500}
8212 8501
8213static bool check_encoder_cloning(struct drm_crtc *crtc) 8502static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8251,6 +8540,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8251 8540
8252 drm_mode_copy(&pipe_config->adjusted_mode, mode); 8541 drm_mode_copy(&pipe_config->adjusted_mode, mode);
8253 drm_mode_copy(&pipe_config->requested_mode, mode); 8542 drm_mode_copy(&pipe_config->requested_mode, mode);
8543
8254 pipe_config->cpu_transcoder = 8544 pipe_config->cpu_transcoder =
8255 (enum transcoder) to_intel_crtc(crtc)->pipe; 8545 (enum transcoder) to_intel_crtc(crtc)->pipe;
8256 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8546 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8277,13 +8567,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8277 if (plane_bpp < 0) 8567 if (plane_bpp < 0)
8278 goto fail; 8568 goto fail;
8279 8569
8570 /*
8571 * Determine the real pipe dimensions. Note that stereo modes can
8572 * increase the actual pipe size due to the frame doubling and
8573 * insertion of additional space for blanks between the frame. This
8574 * is stored in the crtc timings. We use the requested mode to do this
8575 * computation to clearly distinguish it from the adjusted mode, which
8576 * can be changed by the connectors in the below retry loop.
8577 */
8578 drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
8579 pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
8580 pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
8581
8280encoder_retry: 8582encoder_retry:
8281 /* Ensure the port clock defaults are reset when retrying. */ 8583 /* Ensure the port clock defaults are reset when retrying. */
8282 pipe_config->port_clock = 0; 8584 pipe_config->port_clock = 0;
8283 pipe_config->pixel_multiplier = 1; 8585 pipe_config->pixel_multiplier = 1;
8284 8586
8285 /* Fill in default crtc timings, allow encoders to overwrite them. */ 8587 /* Fill in default crtc timings, allow encoders to overwrite them. */
8286 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0); 8588 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
8287 8589
8288 /* Pass our mode to the connectors and the CRTC to give them a chance to 8590 /* Pass our mode to the connectors and the CRTC to give them a chance to
8289 * adjust it according to limitations or connector properties, and also 8591 * adjust it according to limitations or connector properties, and also
@@ -8304,7 +8606,8 @@ encoder_retry:
8304 /* Set default port clock if not overwritten by the encoder. Needs to be 8606 /* Set default port clock if not overwritten by the encoder. Needs to be
8305 * done afterwards in case the encoder adjusts the mode. */ 8607 * done afterwards in case the encoder adjusts the mode. */
8306 if (!pipe_config->port_clock) 8608 if (!pipe_config->port_clock)
8307 pipe_config->port_clock = pipe_config->adjusted_mode.clock; 8609 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
8610 * pipe_config->pixel_multiplier;
8308 8611
8309 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 8612 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8310 if (ret < 0) { 8613 if (ret < 0) {
@@ -8491,13 +8794,9 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8491 8794
8492} 8795}
8493 8796
8494static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur, 8797static bool intel_fuzzy_clock_check(int clock1, int clock2)
8495 struct intel_crtc_config *new)
8496{ 8798{
8497 int clock1, clock2, diff; 8799 int diff;
8498
8499 clock1 = cur->adjusted_mode.clock;
8500 clock2 = new->adjusted_mode.clock;
8501 8800
8502 if (clock1 == clock2) 8801 if (clock1 == clock2)
8503 return true; 8802 return true;
@@ -8551,6 +8850,15 @@ intel_pipe_config_compare(struct drm_device *dev,
8551 return false; \ 8850 return false; \
8552 } 8851 }
8553 8852
8853#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
8854 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8855 DRM_ERROR("mismatch in " #name " " \
8856 "(expected %i, found %i)\n", \
8857 current_config->name, \
8858 pipe_config->name); \
8859 return false; \
8860 }
8861
8554#define PIPE_CONF_QUIRK(quirk) \ 8862#define PIPE_CONF_QUIRK(quirk) \
8555 ((current_config->quirks | pipe_config->quirks) & (quirk)) 8863 ((current_config->quirks | pipe_config->quirks) & (quirk))
8556 8864
@@ -8564,6 +8872,13 @@ intel_pipe_config_compare(struct drm_device *dev,
8564 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 8872 PIPE_CONF_CHECK_I(fdi_m_n.link_n);
8565 PIPE_CONF_CHECK_I(fdi_m_n.tu); 8873 PIPE_CONF_CHECK_I(fdi_m_n.tu);
8566 8874
8875 PIPE_CONF_CHECK_I(has_dp_encoder);
8876 PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
8877 PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
8878 PIPE_CONF_CHECK_I(dp_m_n.link_m);
8879 PIPE_CONF_CHECK_I(dp_m_n.link_n);
8880 PIPE_CONF_CHECK_I(dp_m_n.tu);
8881
8567 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); 8882 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
8568 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); 8883 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
8569 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); 8884 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8594,8 +8909,8 @@ intel_pipe_config_compare(struct drm_device *dev,
8594 DRM_MODE_FLAG_NVSYNC); 8909 DRM_MODE_FLAG_NVSYNC);
8595 } 8910 }
8596 8911
8597 PIPE_CONF_CHECK_I(requested_mode.hdisplay); 8912 PIPE_CONF_CHECK_I(pipe_src_w);
8598 PIPE_CONF_CHECK_I(requested_mode.vdisplay); 8913 PIPE_CONF_CHECK_I(pipe_src_h);
8599 8914
8600 PIPE_CONF_CHECK_I(gmch_pfit.control); 8915 PIPE_CONF_CHECK_I(gmch_pfit.control);
8601 /* pfit ratios are autocomputed by the hw on gen4+ */ 8916 /* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8610,26 +8925,28 @@ intel_pipe_config_compare(struct drm_device *dev,
8610 8925
8611 PIPE_CONF_CHECK_I(ips_enabled); 8926 PIPE_CONF_CHECK_I(ips_enabled);
8612 8927
8928 PIPE_CONF_CHECK_I(double_wide);
8929
8613 PIPE_CONF_CHECK_I(shared_dpll); 8930 PIPE_CONF_CHECK_I(shared_dpll);
8614 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 8931 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8615 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 8932 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8616 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 8933 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8617 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 8934 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8618 8935
8936 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8937 PIPE_CONF_CHECK_I(pipe_bpp);
8938
8939 if (!IS_HASWELL(dev)) {
8940 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
8941 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8942 }
8943
8619#undef PIPE_CONF_CHECK_X 8944#undef PIPE_CONF_CHECK_X
8620#undef PIPE_CONF_CHECK_I 8945#undef PIPE_CONF_CHECK_I
8621#undef PIPE_CONF_CHECK_FLAGS 8946#undef PIPE_CONF_CHECK_FLAGS
8947#undef PIPE_CONF_CHECK_CLOCK_FUZZY
8622#undef PIPE_CONF_QUIRK 8948#undef PIPE_CONF_QUIRK
8623 8949
8624 if (!IS_HASWELL(dev)) {
8625 if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
8626 DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
8627 current_config->adjusted_mode.clock,
8628 pipe_config->adjusted_mode.clock);
8629 return false;
8630 }
8631 }
8632
8633 return true; 8950 return true;
8634} 8951}
8635 8952
@@ -8761,9 +9078,6 @@ check_crtc_state(struct drm_device *dev)
8761 encoder->get_config(encoder, &pipe_config); 9078 encoder->get_config(encoder, &pipe_config);
8762 } 9079 }
8763 9080
8764 if (dev_priv->display.get_clock)
8765 dev_priv->display.get_clock(crtc, &pipe_config);
8766
8767 WARN(crtc->active != active, 9081 WARN(crtc->active != active,
8768 "crtc active state doesn't match with hw state " 9082 "crtc active state doesn't match with hw state "
8769 "(expected %i, found %i)\n", crtc->active, active); 9083 "(expected %i, found %i)\n", crtc->active, active);
@@ -8838,6 +9152,18 @@ intel_modeset_check_state(struct drm_device *dev)
8838 check_shared_dpll_state(dev); 9152 check_shared_dpll_state(dev);
8839} 9153}
8840 9154
9155void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
9156 int dotclock)
9157{
9158 /*
9159 * FDI already provided one idea for the dotclock.
9160 * Yell if the encoder disagrees.
9161 */
9162 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
9163 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9164 pipe_config->adjusted_mode.crtc_clock, dotclock);
9165}
9166
8841static int __intel_set_mode(struct drm_crtc *crtc, 9167static int __intel_set_mode(struct drm_crtc *crtc,
8842 struct drm_display_mode *mode, 9168 struct drm_display_mode *mode,
8843 int x, int y, struct drm_framebuffer *fb) 9169 int x, int y, struct drm_framebuffer *fb)
@@ -8850,7 +9176,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
8850 unsigned disable_pipes, prepare_pipes, modeset_pipes; 9176 unsigned disable_pipes, prepare_pipes, modeset_pipes;
8851 int ret = 0; 9177 int ret = 0;
8852 9178
8853 saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); 9179 saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
8854 if (!saved_mode) 9180 if (!saved_mode)
8855 return -ENOMEM; 9181 return -ENOMEM;
8856 saved_hwmode = saved_mode + 1; 9182 saved_hwmode = saved_mode + 1;
@@ -9389,7 +9715,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
9389 struct intel_crtc *intel_crtc; 9715 struct intel_crtc *intel_crtc;
9390 int i; 9716 int i;
9391 9717
9392 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 9718 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
9393 if (intel_crtc == NULL) 9719 if (intel_crtc == NULL)
9394 return; 9720 return;
9395 9721
@@ -9555,6 +9881,8 @@ static void intel_setup_outputs(struct drm_device *dev)
9555 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 9881 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
9556 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 9882 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
9557 } 9883 }
9884
9885 intel_dsi_init(dev);
9558 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 9886 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
9559 bool found = false; 9887 bool found = false;
9560 9888
@@ -9787,7 +10115,6 @@ static void intel_init_display(struct drm_device *dev)
9787 dev_priv->display.update_plane = ironlake_update_plane; 10115 dev_priv->display.update_plane = ironlake_update_plane;
9788 } else if (HAS_PCH_SPLIT(dev)) { 10116 } else if (HAS_PCH_SPLIT(dev)) {
9789 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 10117 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
9790 dev_priv->display.get_clock = ironlake_crtc_clock_get;
9791 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 10118 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9792 dev_priv->display.crtc_enable = ironlake_crtc_enable; 10119 dev_priv->display.crtc_enable = ironlake_crtc_enable;
9793 dev_priv->display.crtc_disable = ironlake_crtc_disable; 10120 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9795,7 +10122,6 @@ static void intel_init_display(struct drm_device *dev)
9795 dev_priv->display.update_plane = ironlake_update_plane; 10122 dev_priv->display.update_plane = ironlake_update_plane;
9796 } else if (IS_VALLEYVIEW(dev)) { 10123 } else if (IS_VALLEYVIEW(dev)) {
9797 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 10124 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9798 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9799 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 10125 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9800 dev_priv->display.crtc_enable = valleyview_crtc_enable; 10126 dev_priv->display.crtc_enable = valleyview_crtc_enable;
9801 dev_priv->display.crtc_disable = i9xx_crtc_disable; 10127 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9803,7 +10129,6 @@ static void intel_init_display(struct drm_device *dev)
9803 dev_priv->display.update_plane = i9xx_update_plane; 10129 dev_priv->display.update_plane = i9xx_update_plane;
9804 } else { 10130 } else {
9805 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 10131 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9806 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9807 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 10132 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9808 dev_priv->display.crtc_enable = i9xx_crtc_enable; 10133 dev_priv->display.crtc_enable = i9xx_crtc_enable;
9809 dev_priv->display.crtc_disable = i9xx_crtc_disable; 10134 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9989,20 +10314,11 @@ static struct intel_quirk intel_quirks[] = {
9989 /* Sony Vaio Y cannot use SSC on LVDS */ 10314 /* Sony Vaio Y cannot use SSC on LVDS */
9990 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10315 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9991 10316
9992 /* Acer Aspire 5734Z must invert backlight brightness */ 10317 /*
9993 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 10318 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
9994 10319 * seem to use inverted backlight PWM.
9995 /* Acer/eMachines G725 */ 10320 */
9996 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 10321 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
9997
9998 /* Acer/eMachines e725 */
9999 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10000
10001 /* Acer/Packard Bell NCL20 */
10002 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10003
10004 /* Acer Aspire 4736Z */
10005 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10006 10322
10007 /* Dell XPS13 HD Sandy Bridge */ 10323 /* Dell XPS13 HD Sandy Bridge */
10008 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, 10324 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10078,12 +10394,19 @@ void i915_disable_vga_mem(struct drm_device *dev)
10078 10394
10079void intel_modeset_init_hw(struct drm_device *dev) 10395void intel_modeset_init_hw(struct drm_device *dev)
10080{ 10396{
10081 intel_init_power_well(dev); 10397 struct drm_i915_private *dev_priv = dev->dev_private;
10082 10398
10083 intel_prepare_ddi(dev); 10399 intel_prepare_ddi(dev);
10084 10400
10085 intel_init_clock_gating(dev); 10401 intel_init_clock_gating(dev);
10086 10402
10403 /* Enable the CRI clock source so we can get at the display */
10404 if (IS_VALLEYVIEW(dev))
10405 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
10406 DPLL_INTEGRATED_CRI_CLK_VLV);
10407
10408 intel_init_dpio(dev);
10409
10087 mutex_lock(&dev->struct_mutex); 10410 mutex_lock(&dev->struct_mutex);
10088 intel_enable_gt_powersave(dev); 10411 intel_enable_gt_powersave(dev);
10089 mutex_unlock(&dev->struct_mutex); 10412 mutex_unlock(&dev->struct_mutex);
@@ -10422,15 +10745,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
10422 pipe); 10745 pipe);
10423 } 10746 }
10424 10747
10425 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10426 base.head) {
10427 if (!crtc->active)
10428 continue;
10429 if (dev_priv->display.get_clock)
10430 dev_priv->display.get_clock(crtc,
10431 &crtc->config);
10432 }
10433
10434 list_for_each_entry(connector, &dev->mode_config.connector_list, 10748 list_for_each_entry(connector, &dev->mode_config.connector_list,
10435 base.head) { 10749 base.head) {
10436 if (connector->get_hw_state(connector)) { 10750 if (connector->get_hw_state(connector)) {
@@ -10455,7 +10769,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10455{ 10769{
10456 struct drm_i915_private *dev_priv = dev->dev_private; 10770 struct drm_i915_private *dev_priv = dev->dev_private;
10457 enum pipe pipe; 10771 enum pipe pipe;
10458 struct drm_plane *plane;
10459 struct intel_crtc *crtc; 10772 struct intel_crtc *crtc;
10460 struct intel_encoder *encoder; 10773 struct intel_encoder *encoder;
10461 int i; 10774 int i;
@@ -10503,6 +10816,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10503 } 10816 }
10504 10817
10505 if (force_restore) { 10818 if (force_restore) {
10819 i915_redisable_vga(dev);
10820
10506 /* 10821 /*
10507 * We need to use raw interfaces for restoring state to avoid 10822 * We need to use raw interfaces for restoring state to avoid
10508 * checking (bogus) intermediate states. 10823 * checking (bogus) intermediate states.
@@ -10514,10 +10829,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10514 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 10829 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
10515 crtc->fb); 10830 crtc->fb);
10516 } 10831 }
10517 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
10518 intel_plane_restore(plane);
10519
10520 i915_redisable_vga(dev);
10521 } else { 10832 } else {
10522 intel_modeset_update_staged_output_state(dev); 10833 intel_modeset_update_staged_output_state(dev);
10523 } 10834 }
@@ -10540,6 +10851,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
10540{ 10851{
10541 struct drm_i915_private *dev_priv = dev->dev_private; 10852 struct drm_i915_private *dev_priv = dev->dev_private;
10542 struct drm_crtc *crtc; 10853 struct drm_crtc *crtc;
10854 struct drm_connector *connector;
10543 10855
10544 /* 10856 /*
10545 * Interrupts and polling as the first thing to avoid creating havoc. 10857 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10582,6 +10894,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
10582 /* destroy backlight, if any, before the connectors */ 10894 /* destroy backlight, if any, before the connectors */
10583 intel_panel_destroy_backlight(dev); 10895 intel_panel_destroy_backlight(dev);
10584 10896
10897 /* destroy the sysfs files before encoders/connectors */
10898 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
10899 drm_sysfs_connector_remove(connector);
10900
10585 drm_mode_config_cleanup(dev); 10901 drm_mode_config_cleanup(dev);
10586 10902
10587 intel_cleanup_overlay(dev); 10903 intel_cleanup_overlay(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 79c14e298ba6..d5bd349105e5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
38 38
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40 40
41struct dp_link_dpll {
42 int link_bw;
43 struct dpll dpll;
44};
45
46static const struct dp_link_dpll gen4_dpll[] = {
47 { DP_LINK_BW_1_62,
48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 { DP_LINK_BW_2_7,
50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51};
52
53static const struct dp_link_dpll pch_dpll[] = {
54 { DP_LINK_BW_1_62,
55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 { DP_LINK_BW_2_7,
57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58};
59
60static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62,
62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65};
66
41/** 67/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct 69 * @intel_dp: DP struct
@@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
211 } 237 }
212} 238}
213 239
240static void
241intel_dp_init_panel_power_sequencer(struct drm_device *dev,
242 struct intel_dp *intel_dp,
243 struct edp_power_seq *out);
244static void
245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
246 struct intel_dp *intel_dp,
247 struct edp_power_seq *out);
248
249static enum pipe
250vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
251{
252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
254 struct drm_device *dev = intel_dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 enum port port = intel_dig_port->port;
257 enum pipe pipe;
258
259 /* modeset should have pipe */
260 if (crtc)
261 return to_intel_crtc(crtc)->pipe;
262
263 /* init time, try to find a pipe with this port selected */
264 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
265 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
266 PANEL_PORT_SELECT_MASK;
267 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
268 return pipe;
269 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
270 return pipe;
271 }
272
273 /* shrug */
274 return PIPE_A;
275}
276
277static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
278{
279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
280
281 if (HAS_PCH_SPLIT(dev))
282 return PCH_PP_CONTROL;
283 else
284 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
285}
286
287static u32 _pp_stat_reg(struct intel_dp *intel_dp)
288{
289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
290
291 if (HAS_PCH_SPLIT(dev))
292 return PCH_PP_STATUS;
293 else
294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295}
296
214static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
215{ 298{
216 struct drm_device *dev = intel_dp_to_dev(intel_dp); 299 struct drm_device *dev = intel_dp_to_dev(intel_dp);
217 struct drm_i915_private *dev_priv = dev->dev_private; 300 struct drm_i915_private *dev_priv = dev->dev_private;
218 u32 pp_stat_reg;
219 301
220 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
221 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
222} 303}
223 304
224static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
225{ 306{
226 struct drm_device *dev = intel_dp_to_dev(intel_dp); 307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
227 struct drm_i915_private *dev_priv = dev->dev_private; 308 struct drm_i915_private *dev_priv = dev->dev_private;
228 u32 pp_ctrl_reg;
229 309
230 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 310 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
231 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
232} 311}
233 312
234static void 313static void
@@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
236{ 315{
237 struct drm_device *dev = intel_dp_to_dev(intel_dp); 316 struct drm_device *dev = intel_dp_to_dev(intel_dp);
238 struct drm_i915_private *dev_priv = dev->dev_private; 317 struct drm_i915_private *dev_priv = dev->dev_private;
239 u32 pp_stat_reg, pp_ctrl_reg;
240 318
241 if (!is_edp(intel_dp)) 319 if (!is_edp(intel_dp))
242 return; 320 return;
243 321
244 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
245 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
246
247 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
248 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 323 WARN(1, "eDP powered off while attempting aux channel communication.\n");
249 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
250 I915_READ(pp_stat_reg), 325 I915_READ(_pp_stat_reg(intel_dp)),
251 I915_READ(pp_ctrl_reg)); 326 I915_READ(_pp_ctrl_reg(intel_dp)));
252 } 327 }
253} 328}
254 329
@@ -361,6 +436,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
361 goto out; 436 goto out;
362 } 437 }
363 438
439 /* Only 5 data registers! */
440 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
441 ret = -E2BIG;
442 goto out;
443 }
444
364 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 445 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
365 /* Must try at least 3 times according to DP spec */ 446 /* Must try at least 3 times according to DP spec */
366 for (try = 0; try < 5; try++) { 447 for (try = 0; try < 5; try++) {
@@ -451,9 +532,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
451 int msg_bytes; 532 int msg_bytes;
452 uint8_t ack; 533 uint8_t ack;
453 534
535 if (WARN_ON(send_bytes > 16))
536 return -E2BIG;
537
454 intel_dp_check_edp(intel_dp); 538 intel_dp_check_edp(intel_dp);
455 if (send_bytes > 16)
456 return -1;
457 msg[0] = AUX_NATIVE_WRITE << 4; 539 msg[0] = AUX_NATIVE_WRITE << 4;
458 msg[1] = address >> 8; 540 msg[1] = address >> 8;
459 msg[2] = address & 0xff; 541 msg[2] = address & 0xff;
@@ -494,6 +576,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
494 uint8_t ack; 576 uint8_t ack;
495 int ret; 577 int ret;
496 578
579 if (WARN_ON(recv_bytes > 19))
580 return -E2BIG;
581
497 intel_dp_check_edp(intel_dp); 582 intel_dp_check_edp(intel_dp);
498 msg[0] = AUX_NATIVE_READ << 4; 583 msg[0] = AUX_NATIVE_READ << 4;
499 msg[1] = address >> 8; 584 msg[1] = address >> 8;
@@ -569,7 +654,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
569 break; 654 break;
570 } 655 }
571 656
572 for (retry = 0; retry < 5; retry++) { 657 /*
658 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
659 * required to retry at least seven times upon receiving AUX_DEFER
660 * before giving up the AUX transaction.
661 */
662 for (retry = 0; retry < 7; retry++) {
573 ret = intel_dp_aux_ch(intel_dp, 663 ret = intel_dp_aux_ch(intel_dp,
574 msg, msg_bytes, 664 msg, msg_bytes,
575 reply, reply_bytes); 665 reply, reply_bytes);
@@ -647,7 +737,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
647 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 737 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
648 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 738 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
649 intel_dp->adapter.algo_data = &intel_dp->algo; 739 intel_dp->adapter.algo_data = &intel_dp->algo;
650 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 740 intel_dp->adapter.dev.parent = intel_connector->base.kdev;
651 741
652 ironlake_edp_panel_vdd_on(intel_dp); 742 ironlake_edp_panel_vdd_on(intel_dp);
653 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 743 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
@@ -660,41 +750,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
660 struct intel_crtc_config *pipe_config, int link_bw) 750 struct intel_crtc_config *pipe_config, int link_bw)
661{ 751{
662 struct drm_device *dev = encoder->base.dev; 752 struct drm_device *dev = encoder->base.dev;
753 const struct dp_link_dpll *divisor = NULL;
754 int i, count = 0;
663 755
664 if (IS_G4X(dev)) { 756 if (IS_G4X(dev)) {
665 if (link_bw == DP_LINK_BW_1_62) { 757 divisor = gen4_dpll;
666 pipe_config->dpll.p1 = 2; 758 count = ARRAY_SIZE(gen4_dpll);
667 pipe_config->dpll.p2 = 10;
668 pipe_config->dpll.n = 2;
669 pipe_config->dpll.m1 = 23;
670 pipe_config->dpll.m2 = 8;
671 } else {
672 pipe_config->dpll.p1 = 1;
673 pipe_config->dpll.p2 = 10;
674 pipe_config->dpll.n = 1;
675 pipe_config->dpll.m1 = 14;
676 pipe_config->dpll.m2 = 2;
677 }
678 pipe_config->clock_set = true;
679 } else if (IS_HASWELL(dev)) { 759 } else if (IS_HASWELL(dev)) {
680 /* Haswell has special-purpose DP DDI clocks. */ 760 /* Haswell has special-purpose DP DDI clocks. */
681 } else if (HAS_PCH_SPLIT(dev)) { 761 } else if (HAS_PCH_SPLIT(dev)) {
682 if (link_bw == DP_LINK_BW_1_62) { 762 divisor = pch_dpll;
683 pipe_config->dpll.n = 1; 763 count = ARRAY_SIZE(pch_dpll);
684 pipe_config->dpll.p1 = 2;
685 pipe_config->dpll.p2 = 10;
686 pipe_config->dpll.m1 = 12;
687 pipe_config->dpll.m2 = 9;
688 } else {
689 pipe_config->dpll.n = 2;
690 pipe_config->dpll.p1 = 1;
691 pipe_config->dpll.p2 = 10;
692 pipe_config->dpll.m1 = 14;
693 pipe_config->dpll.m2 = 8;
694 }
695 pipe_config->clock_set = true;
696 } else if (IS_VALLEYVIEW(dev)) { 764 } else if (IS_VALLEYVIEW(dev)) {
697 /* FIXME: Need to figure out optimized DP clocks for vlv. */ 765 divisor = vlv_dpll;
766 count = ARRAY_SIZE(vlv_dpll);
767 }
768
769 if (divisor && count) {
770 for (i = 0; i < count; i++) {
771 if (link_bw == divisor[i].link_bw) {
772 pipe_config->dpll = divisor[i].dpll;
773 pipe_config->clock_set = true;
774 break;
775 }
776 }
698 } 777 }
699} 778}
700 779
@@ -737,7 +816,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
737 816
738 DRM_DEBUG_KMS("DP link computation with max lane count %i " 817 DRM_DEBUG_KMS("DP link computation with max lane count %i "
739 "max bw %02x pixel clock %iKHz\n", 818 "max bw %02x pixel clock %iKHz\n",
740 max_lane_count, bws[max_clock], adjusted_mode->clock); 819 max_lane_count, bws[max_clock],
820 adjusted_mode->crtc_clock);
741 821
742 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 822 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
743 * bpc in between. */ 823 * bpc in between. */
@@ -749,7 +829,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
749 } 829 }
750 830
751 for (; bpp >= 6*3; bpp -= 2*3) { 831 for (; bpp >= 6*3; bpp -= 2*3) {
752 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 832 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
833 bpp);
753 834
754 for (clock = 0; clock <= max_clock; clock++) { 835 for (clock = 0; clock <= max_clock; clock++) {
755 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 836 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +875,8 @@ found:
794 mode_rate, link_avail); 875 mode_rate, link_avail);
795 876
796 intel_link_compute_m_n(bpp, lane_count, 877 intel_link_compute_m_n(bpp, lane_count,
797 adjusted_mode->clock, pipe_config->port_clock, 878 adjusted_mode->crtc_clock,
879 pipe_config->port_clock,
798 &pipe_config->dp_m_n); 880 &pipe_config->dp_m_n);
799 881
800 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 882 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +884,6 @@ found:
802 return true; 884 return true;
803} 885}
804 886
805void intel_dp_init_link_config(struct intel_dp *intel_dp)
806{
807 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
808 intel_dp->link_configuration[0] = intel_dp->link_bw;
809 intel_dp->link_configuration[1] = intel_dp->lane_count;
810 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
811 /*
812 * Check for DPCD version > 1.1 and enhanced framing support
813 */
814 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
815 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
816 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
817 }
818}
819
820static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 887static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
821{ 888{
822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 889 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +956,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
889 intel_write_eld(&encoder->base, adjusted_mode); 956 intel_write_eld(&encoder->base, adjusted_mode);
890 } 957 }
891 958
892 intel_dp_init_link_config(intel_dp);
893
894 /* Split out the IBX/CPU vs CPT settings */ 959 /* Split out the IBX/CPU vs CPT settings */
895 960
896 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 961 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +965,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
900 intel_dp->DP |= DP_SYNC_VS_HIGH; 965 intel_dp->DP |= DP_SYNC_VS_HIGH;
901 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 966 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
902 967
903 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 968 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
904 intel_dp->DP |= DP_ENHANCED_FRAMING; 969 intel_dp->DP |= DP_ENHANCED_FRAMING;
905 970
906 intel_dp->DP |= crtc->pipe << 29; 971 intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +979,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
914 intel_dp->DP |= DP_SYNC_VS_HIGH; 979 intel_dp->DP |= DP_SYNC_VS_HIGH;
915 intel_dp->DP |= DP_LINK_TRAIN_OFF; 980 intel_dp->DP |= DP_LINK_TRAIN_OFF;
916 981
917 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 982 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
918 intel_dp->DP |= DP_ENHANCED_FRAMING; 983 intel_dp->DP |= DP_ENHANCED_FRAMING;
919 984
920 if (crtc->pipe == 1) 985 if (crtc->pipe == 1)
@@ -944,8 +1009,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
944 struct drm_i915_private *dev_priv = dev->dev_private; 1009 struct drm_i915_private *dev_priv = dev->dev_private;
945 u32 pp_stat_reg, pp_ctrl_reg; 1010 u32 pp_stat_reg, pp_ctrl_reg;
946 1011
947 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1012 pp_stat_reg = _pp_stat_reg(intel_dp);
948 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1013 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
949 1014
950 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1015 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
951 mask, value, 1016 mask, value,
@@ -987,11 +1052,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
987 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1052 struct drm_device *dev = intel_dp_to_dev(intel_dp);
988 struct drm_i915_private *dev_priv = dev->dev_private; 1053 struct drm_i915_private *dev_priv = dev->dev_private;
989 u32 control; 1054 u32 control;
990 u32 pp_ctrl_reg;
991
992 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
993 control = I915_READ(pp_ctrl_reg);
994 1055
1056 control = I915_READ(_pp_ctrl_reg(intel_dp));
995 control &= ~PANEL_UNLOCK_MASK; 1057 control &= ~PANEL_UNLOCK_MASK;
996 control |= PANEL_UNLOCK_REGS; 1058 control |= PANEL_UNLOCK_REGS;
997 return control; 1059 return control;
@@ -1024,8 +1086,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1024 pp = ironlake_get_pp_control(intel_dp); 1086 pp = ironlake_get_pp_control(intel_dp);
1025 pp |= EDP_FORCE_VDD; 1087 pp |= EDP_FORCE_VDD;
1026 1088
1027 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1089 pp_stat_reg = _pp_stat_reg(intel_dp);
1028 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1090 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1029 1091
1030 I915_WRITE(pp_ctrl_reg, pp); 1092 I915_WRITE(pp_ctrl_reg, pp);
1031 POSTING_READ(pp_ctrl_reg); 1093 POSTING_READ(pp_ctrl_reg);
@@ -1053,8 +1115,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1053 pp = ironlake_get_pp_control(intel_dp); 1115 pp = ironlake_get_pp_control(intel_dp);
1054 pp &= ~EDP_FORCE_VDD; 1116 pp &= ~EDP_FORCE_VDD;
1055 1117
1056 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1118 pp_stat_reg = _pp_ctrl_reg(intel_dp);
1057 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1119 pp_ctrl_reg = _pp_stat_reg(intel_dp);
1058 1120
1059 I915_WRITE(pp_ctrl_reg, pp); 1121 I915_WRITE(pp_ctrl_reg, pp);
1060 POSTING_READ(pp_ctrl_reg); 1122 POSTING_READ(pp_ctrl_reg);
@@ -1119,20 +1181,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1119 1181
1120 ironlake_wait_panel_power_cycle(intel_dp); 1182 ironlake_wait_panel_power_cycle(intel_dp);
1121 1183
1184 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1122 pp = ironlake_get_pp_control(intel_dp); 1185 pp = ironlake_get_pp_control(intel_dp);
1123 if (IS_GEN5(dev)) { 1186 if (IS_GEN5(dev)) {
1124 /* ILK workaround: disable reset around power sequence */ 1187 /* ILK workaround: disable reset around power sequence */
1125 pp &= ~PANEL_POWER_RESET; 1188 pp &= ~PANEL_POWER_RESET;
1126 I915_WRITE(PCH_PP_CONTROL, pp); 1189 I915_WRITE(pp_ctrl_reg, pp);
1127 POSTING_READ(PCH_PP_CONTROL); 1190 POSTING_READ(pp_ctrl_reg);
1128 } 1191 }
1129 1192
1130 pp |= POWER_TARGET_ON; 1193 pp |= POWER_TARGET_ON;
1131 if (!IS_GEN5(dev)) 1194 if (!IS_GEN5(dev))
1132 pp |= PANEL_POWER_RESET; 1195 pp |= PANEL_POWER_RESET;
1133 1196
1134 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1135
1136 I915_WRITE(pp_ctrl_reg, pp); 1197 I915_WRITE(pp_ctrl_reg, pp);
1137 POSTING_READ(pp_ctrl_reg); 1198 POSTING_READ(pp_ctrl_reg);
1138 1199
@@ -1140,8 +1201,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1140 1201
1141 if (IS_GEN5(dev)) { 1202 if (IS_GEN5(dev)) {
1142 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1203 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1143 I915_WRITE(PCH_PP_CONTROL, pp); 1204 I915_WRITE(pp_ctrl_reg, pp);
1144 POSTING_READ(PCH_PP_CONTROL); 1205 POSTING_READ(pp_ctrl_reg);
1145 } 1206 }
1146} 1207}
1147 1208
@@ -1164,7 +1225,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1164 * panels get very unhappy and cease to work. */ 1225 * panels get very unhappy and cease to work. */
1165 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1226 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1166 1227
1167 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1228 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1168 1229
1169 I915_WRITE(pp_ctrl_reg, pp); 1230 I915_WRITE(pp_ctrl_reg, pp);
1170 POSTING_READ(pp_ctrl_reg); 1231 POSTING_READ(pp_ctrl_reg);
@@ -1197,7 +1258,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1197 pp = ironlake_get_pp_control(intel_dp); 1258 pp = ironlake_get_pp_control(intel_dp);
1198 pp |= EDP_BLC_ENABLE; 1259 pp |= EDP_BLC_ENABLE;
1199 1260
1200 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1261 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1201 1262
1202 I915_WRITE(pp_ctrl_reg, pp); 1263 I915_WRITE(pp_ctrl_reg, pp);
1203 POSTING_READ(pp_ctrl_reg); 1264 POSTING_READ(pp_ctrl_reg);
@@ -1221,7 +1282,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1221 pp = ironlake_get_pp_control(intel_dp); 1282 pp = ironlake_get_pp_control(intel_dp);
1222 pp &= ~EDP_BLC_ENABLE; 1283 pp &= ~EDP_BLC_ENABLE;
1223 1284
1224 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1285 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1225 1286
1226 I915_WRITE(pp_ctrl_reg, pp); 1287 I915_WRITE(pp_ctrl_reg, pp);
1227 POSTING_READ(pp_ctrl_reg); 1288 POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1429,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1368 struct drm_i915_private *dev_priv = dev->dev_private; 1429 struct drm_i915_private *dev_priv = dev->dev_private;
1369 enum port port = dp_to_dig_port(intel_dp)->port; 1430 enum port port = dp_to_dig_port(intel_dp)->port;
1370 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1431 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1432 int dotclock;
1371 1433
1372 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1434 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1373 tmp = I915_READ(intel_dp->output_reg); 1435 tmp = I915_READ(intel_dp->output_reg);
@@ -1395,28 +1457,41 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1395 1457
1396 pipe_config->adjusted_mode.flags |= flags; 1458 pipe_config->adjusted_mode.flags |= flags;
1397 1459
1398 if (dp_to_dig_port(intel_dp)->port == PORT_A) { 1460 pipe_config->has_dp_encoder = true;
1461
1462 intel_dp_get_m_n(crtc, pipe_config);
1463
1464 if (port == PORT_A) {
1399 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1465 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1400 pipe_config->port_clock = 162000; 1466 pipe_config->port_clock = 162000;
1401 else 1467 else
1402 pipe_config->port_clock = 270000; 1468 pipe_config->port_clock = 270000;
1403 } 1469 }
1470
1471 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1472 &pipe_config->dp_m_n);
1473
1474 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1475 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1476
1477 pipe_config->adjusted_mode.crtc_clock = dotclock;
1404} 1478}
1405 1479
1406static bool is_edp_psr(struct intel_dp *intel_dp) 1480static bool is_edp_psr(struct drm_device *dev)
1407{ 1481{
1408 return is_edp(intel_dp) && 1482 struct drm_i915_private *dev_priv = dev->dev_private;
1409 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; 1483
1484 return dev_priv->psr.sink_support;
1410} 1485}
1411 1486
1412static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1487static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1413{ 1488{
1414 struct drm_i915_private *dev_priv = dev->dev_private; 1489 struct drm_i915_private *dev_priv = dev->dev_private;
1415 1490
1416 if (!IS_HASWELL(dev)) 1491 if (!HAS_PSR(dev))
1417 return false; 1492 return false;
1418 1493
1419 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 1494 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1420} 1495}
1421 1496
1422static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1497static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1466,7 +1541,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1466 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1541 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1467 1542
1468 /* Avoid continuous PSR exit by masking memup and hpd */ 1543 /* Avoid continuous PSR exit by masking memup and hpd */
1469 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1544 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1470 EDP_PSR_DEBUG_MASK_HPD); 1545 EDP_PSR_DEBUG_MASK_HPD);
1471 1546
1472 intel_dp->psr_setup_done = true; 1547 intel_dp->psr_setup_done = true;
@@ -1491,9 +1566,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1491 DP_PSR_MAIN_LINK_ACTIVE); 1566 DP_PSR_MAIN_LINK_ACTIVE);
1492 1567
1493 /* Setup AUX registers */ 1568 /* Setup AUX registers */
1494 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); 1569 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1495 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); 1570 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1496 I915_WRITE(EDP_PSR_AUX_CTL, 1571 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1497 DP_AUX_CH_CTL_TIME_OUT_400us | 1572 DP_AUX_CH_CTL_TIME_OUT_400us |
1498 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1573 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1499 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1574 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1516,7 +1591,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1516 } else 1591 } else
1517 val |= EDP_PSR_LINK_DISABLE; 1592 val |= EDP_PSR_LINK_DISABLE;
1518 1593
1519 I915_WRITE(EDP_PSR_CTL, val | 1594 I915_WRITE(EDP_PSR_CTL(dev), val |
1520 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1595 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1521 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1596 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1522 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1597 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -1533,42 +1608,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1533 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1608 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1534 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1609 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1535 1610
1536 if (!IS_HASWELL(dev)) { 1611 dev_priv->psr.source_ok = false;
1612
1613 if (!HAS_PSR(dev)) {
1537 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1614 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1538 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1539 return false; 1615 return false;
1540 } 1616 }
1541 1617
1542 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1618 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1543 (dig_port->port != PORT_A)) { 1619 (dig_port->port != PORT_A)) {
1544 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1620 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1545 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1546 return false;
1547 }
1548
1549 if (!is_edp_psr(intel_dp)) {
1550 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1551 dev_priv->no_psr_reason = PSR_NO_SINK;
1552 return false; 1621 return false;
1553 } 1622 }
1554 1623
1555 if (!i915_enable_psr) { 1624 if (!i915_enable_psr) {
1556 DRM_DEBUG_KMS("PSR disable by flag\n"); 1625 DRM_DEBUG_KMS("PSR disable by flag\n");
1557 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1558 return false; 1626 return false;
1559 } 1627 }
1560 1628
1561 crtc = dig_port->base.base.crtc; 1629 crtc = dig_port->base.base.crtc;
1562 if (crtc == NULL) { 1630 if (crtc == NULL) {
1563 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1631 DRM_DEBUG_KMS("crtc not active for PSR\n");
1564 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1565 return false; 1632 return false;
1566 } 1633 }
1567 1634
1568 intel_crtc = to_intel_crtc(crtc); 1635 intel_crtc = to_intel_crtc(crtc);
1569 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { 1636 if (!intel_crtc_active(crtc)) {
1570 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1637 DRM_DEBUG_KMS("crtc not active for PSR\n");
1571 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1572 return false; 1638 return false;
1573 } 1639 }
1574 1640
@@ -1576,29 +1642,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1576 if (obj->tiling_mode != I915_TILING_X || 1642 if (obj->tiling_mode != I915_TILING_X ||
1577 obj->fence_reg == I915_FENCE_REG_NONE) { 1643 obj->fence_reg == I915_FENCE_REG_NONE) {
1578 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1644 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1579 dev_priv->no_psr_reason = PSR_NOT_TILED;
1580 return false; 1645 return false;
1581 } 1646 }
1582 1647
1583 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1648 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1584 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1649 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1585 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1586 return false; 1650 return false;
1587 } 1651 }
1588 1652
1589 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1653 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1590 S3D_ENABLE) { 1654 S3D_ENABLE) {
1591 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1655 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1592 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1593 return false; 1656 return false;
1594 } 1657 }
1595 1658
1596 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 1659 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1597 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1660 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1598 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1599 return false; 1661 return false;
1600 } 1662 }
1601 1663
1664 dev_priv->psr.source_ok = true;
1602 return true; 1665 return true;
1603} 1666}
1604 1667
@@ -1637,10 +1700,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1637 if (!intel_edp_is_psr_enabled(dev)) 1700 if (!intel_edp_is_psr_enabled(dev))
1638 return; 1701 return;
1639 1702
1640 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 1703 I915_WRITE(EDP_PSR_CTL(dev),
1704 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1641 1705
1642 /* Wait till PSR is idle */ 1706 /* Wait till PSR is idle */
1643 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 1707 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1644 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1708 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1645 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1709 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1646} 1710}
@@ -1654,7 +1718,7 @@ void intel_edp_psr_update(struct drm_device *dev)
1654 if (encoder->type == INTEL_OUTPUT_EDP) { 1718 if (encoder->type == INTEL_OUTPUT_EDP) {
1655 intel_dp = enc_to_intel_dp(&encoder->base); 1719 intel_dp = enc_to_intel_dp(&encoder->base);
1656 1720
1657 if (!is_edp_psr(intel_dp)) 1721 if (!is_edp_psr(dev))
1658 return; 1722 return;
1659 1723
1660 if (!intel_edp_psr_match_conditions(intel_dp)) 1724 if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1713,14 +1777,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1713 ironlake_edp_panel_vdd_off(intel_dp, true); 1777 ironlake_edp_panel_vdd_off(intel_dp, true);
1714 intel_dp_complete_link_train(intel_dp); 1778 intel_dp_complete_link_train(intel_dp);
1715 intel_dp_stop_link_train(intel_dp); 1779 intel_dp_stop_link_train(intel_dp);
1780}
1781
1782static void g4x_enable_dp(struct intel_encoder *encoder)
1783{
1784 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1785
1786 intel_enable_dp(encoder);
1716 ironlake_edp_backlight_on(intel_dp); 1787 ironlake_edp_backlight_on(intel_dp);
1717} 1788}
1718 1789
1719static void vlv_enable_dp(struct intel_encoder *encoder) 1790static void vlv_enable_dp(struct intel_encoder *encoder)
1720{ 1791{
1792 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1793
1794 ironlake_edp_backlight_on(intel_dp);
1721} 1795}
1722 1796
1723static void intel_pre_enable_dp(struct intel_encoder *encoder) 1797static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1724{ 1798{
1725 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1799 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1726 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1800 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1738,53 +1812,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1738 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1812 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1739 int port = vlv_dport_to_channel(dport); 1813 int port = vlv_dport_to_channel(dport);
1740 int pipe = intel_crtc->pipe; 1814 int pipe = intel_crtc->pipe;
1815 struct edp_power_seq power_seq;
1741 u32 val; 1816 u32 val;
1742 1817
1743 mutex_lock(&dev_priv->dpio_lock); 1818 mutex_lock(&dev_priv->dpio_lock);
1744 1819
1745 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1820 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1746 val = 0; 1821 val = 0;
1747 if (pipe) 1822 if (pipe)
1748 val |= (1<<21); 1823 val |= (1<<21);
1749 else 1824 else
1750 val &= ~(1<<21); 1825 val &= ~(1<<21);
1751 val |= 0x001000c4; 1826 val |= 0x001000c4;
1752 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1827 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1753 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018); 1828 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
1754 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); 1829 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
1755 1830
1756 mutex_unlock(&dev_priv->dpio_lock); 1831 mutex_unlock(&dev_priv->dpio_lock);
1757 1832
1833 /* init power sequencer on this pipe and port */
1834 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1835 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1836 &power_seq);
1837
1758 intel_enable_dp(encoder); 1838 intel_enable_dp(encoder);
1759 1839
1760 vlv_wait_port_ready(dev_priv, port); 1840 vlv_wait_port_ready(dev_priv, port);
1761} 1841}
1762 1842
1763static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) 1843static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1764{ 1844{
1765 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1845 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1766 struct drm_device *dev = encoder->base.dev; 1846 struct drm_device *dev = encoder->base.dev;
1767 struct drm_i915_private *dev_priv = dev->dev_private; 1847 struct drm_i915_private *dev_priv = dev->dev_private;
1848 struct intel_crtc *intel_crtc =
1849 to_intel_crtc(encoder->base.crtc);
1768 int port = vlv_dport_to_channel(dport); 1850 int port = vlv_dport_to_channel(dport);
1769 1851 int pipe = intel_crtc->pipe;
1770 if (!IS_VALLEYVIEW(dev))
1771 return;
1772 1852
1773 /* Program Tx lane resets to default */ 1853 /* Program Tx lane resets to default */
1774 mutex_lock(&dev_priv->dpio_lock); 1854 mutex_lock(&dev_priv->dpio_lock);
1775 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1855 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1776 DPIO_PCS_TX_LANE2_RESET | 1856 DPIO_PCS_TX_LANE2_RESET |
1777 DPIO_PCS_TX_LANE1_RESET); 1857 DPIO_PCS_TX_LANE1_RESET);
1778 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1858 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1779 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1859 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1780 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1860 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1781 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1861 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1782 DPIO_PCS_CLK_SOFT_RESET); 1862 DPIO_PCS_CLK_SOFT_RESET);
1783 1863
1784 /* Fix up inter-pair skew failure */ 1864 /* Fix up inter-pair skew failure */
1785 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1865 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1786 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1866 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1787 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1867 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1788 mutex_unlock(&dev_priv->dpio_lock); 1868 mutex_unlock(&dev_priv->dpio_lock);
1789} 1869}
1790 1870
@@ -1919,10 +1999,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1919 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1920 struct drm_i915_private *dev_priv = dev->dev_private; 2000 struct drm_i915_private *dev_priv = dev->dev_private;
1921 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2001 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2002 struct intel_crtc *intel_crtc =
2003 to_intel_crtc(dport->base.base.crtc);
1922 unsigned long demph_reg_value, preemph_reg_value, 2004 unsigned long demph_reg_value, preemph_reg_value,
1923 uniqtranscale_reg_value; 2005 uniqtranscale_reg_value;
1924 uint8_t train_set = intel_dp->train_set[0]; 2006 uint8_t train_set = intel_dp->train_set[0];
1925 int port = vlv_dport_to_channel(dport); 2007 int port = vlv_dport_to_channel(dport);
2008 int pipe = intel_crtc->pipe;
1926 2009
1927 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2010 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1928 case DP_TRAIN_PRE_EMPHASIS_0: 2011 case DP_TRAIN_PRE_EMPHASIS_0:
@@ -1998,14 +2081,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1998 } 2081 }
1999 2082
2000 mutex_lock(&dev_priv->dpio_lock); 2083 mutex_lock(&dev_priv->dpio_lock);
2001 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); 2084 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
2002 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); 2085 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
2003 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 2086 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
2004 uniqtranscale_reg_value); 2087 uniqtranscale_reg_value);
2005 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040); 2088 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
2006 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 2089 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
2007 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 2090 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
2008 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); 2091 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
2009 mutex_unlock(&dev_priv->dpio_lock); 2092 mutex_unlock(&dev_priv->dpio_lock);
2010 2093
2011 return 0; 2094 return 0;
@@ -2207,7 +2290,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2207 2290
2208static bool 2291static bool
2209intel_dp_set_link_train(struct intel_dp *intel_dp, 2292intel_dp_set_link_train(struct intel_dp *intel_dp,
2210 uint32_t dp_reg_value, 2293 uint32_t *DP,
2211 uint8_t dp_train_pat) 2294 uint8_t dp_train_pat)
2212{ 2295{
2213 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2296 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -2243,50 +2326,51 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2243 I915_WRITE(DP_TP_CTL(port), temp); 2326 I915_WRITE(DP_TP_CTL(port), temp);
2244 2327
2245 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2328 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2246 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 2329 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2247 2330
2248 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2331 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2249 case DP_TRAINING_PATTERN_DISABLE: 2332 case DP_TRAINING_PATTERN_DISABLE:
2250 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 2333 *DP |= DP_LINK_TRAIN_OFF_CPT;
2251 break; 2334 break;
2252 case DP_TRAINING_PATTERN_1: 2335 case DP_TRAINING_PATTERN_1:
2253 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 2336 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2254 break; 2337 break;
2255 case DP_TRAINING_PATTERN_2: 2338 case DP_TRAINING_PATTERN_2:
2256 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2339 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2257 break; 2340 break;
2258 case DP_TRAINING_PATTERN_3: 2341 case DP_TRAINING_PATTERN_3:
2259 DRM_ERROR("DP training pattern 3 not supported\n"); 2342 DRM_ERROR("DP training pattern 3 not supported\n");
2260 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2343 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2261 break; 2344 break;
2262 } 2345 }
2263 2346
2264 } else { 2347 } else {
2265 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 2348 *DP &= ~DP_LINK_TRAIN_MASK;
2266 2349
2267 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2350 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2268 case DP_TRAINING_PATTERN_DISABLE: 2351 case DP_TRAINING_PATTERN_DISABLE:
2269 dp_reg_value |= DP_LINK_TRAIN_OFF; 2352 *DP |= DP_LINK_TRAIN_OFF;
2270 break; 2353 break;
2271 case DP_TRAINING_PATTERN_1: 2354 case DP_TRAINING_PATTERN_1:
2272 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 2355 *DP |= DP_LINK_TRAIN_PAT_1;
2273 break; 2356 break;
2274 case DP_TRAINING_PATTERN_2: 2357 case DP_TRAINING_PATTERN_2:
2275 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2358 *DP |= DP_LINK_TRAIN_PAT_2;
2276 break; 2359 break;
2277 case DP_TRAINING_PATTERN_3: 2360 case DP_TRAINING_PATTERN_3:
2278 DRM_ERROR("DP training pattern 3 not supported\n"); 2361 DRM_ERROR("DP training pattern 3 not supported\n");
2279 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2362 *DP |= DP_LINK_TRAIN_PAT_2;
2280 break; 2363 break;
2281 } 2364 }
2282 } 2365 }
2283 2366
2284 I915_WRITE(intel_dp->output_reg, dp_reg_value); 2367 I915_WRITE(intel_dp->output_reg, *DP);
2285 POSTING_READ(intel_dp->output_reg); 2368 POSTING_READ(intel_dp->output_reg);
2286 2369
2287 intel_dp_aux_native_write_1(intel_dp, 2370 ret = intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET,
2288 DP_TRAINING_PATTERN_SET, 2371 dp_train_pat);
2289 dp_train_pat); 2372 if (ret != 1)
2373 return false;
2290 2374
2291 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 2375 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
2292 DP_TRAINING_PATTERN_DISABLE) { 2376 DP_TRAINING_PATTERN_DISABLE) {
@@ -2301,6 +2385,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2301 return true; 2385 return true;
2302} 2386}
2303 2387
2388static bool
2389intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2390 uint8_t dp_train_pat)
2391{
2392 memset(intel_dp->train_set, 0, 4);
2393 intel_dp_set_signal_levels(intel_dp, DP);
2394 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2395}
2396
2397static bool
2398intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2399 uint8_t link_status[DP_LINK_STATUS_SIZE])
2400{
2401 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2402 struct drm_device *dev = intel_dig_port->base.base.dev;
2403 struct drm_i915_private *dev_priv = dev->dev_private;
2404 int ret;
2405
2406 intel_get_adjust_train(intel_dp, link_status);
2407 intel_dp_set_signal_levels(intel_dp, DP);
2408
2409 I915_WRITE(intel_dp->output_reg, *DP);
2410 POSTING_READ(intel_dp->output_reg);
2411
2412 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2413 intel_dp->train_set,
2414 intel_dp->lane_count);
2415
2416 return ret == intel_dp->lane_count;
2417}
2418
2304static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2419static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2305{ 2420{
2306 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2421 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -2342,32 +2457,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2342 uint8_t voltage; 2457 uint8_t voltage;
2343 int voltage_tries, loop_tries; 2458 int voltage_tries, loop_tries;
2344 uint32_t DP = intel_dp->DP; 2459 uint32_t DP = intel_dp->DP;
2460 uint8_t link_config[2];
2345 2461
2346 if (HAS_DDI(dev)) 2462 if (HAS_DDI(dev))
2347 intel_ddi_prepare_link_retrain(encoder); 2463 intel_ddi_prepare_link_retrain(encoder);
2348 2464
2349 /* Write the link configuration data */ 2465 /* Write the link configuration data */
2350 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 2466 link_config[0] = intel_dp->link_bw;
2351 intel_dp->link_configuration, 2467 link_config[1] = intel_dp->lane_count;
2352 DP_LINK_CONFIGURATION_SIZE); 2468 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2469 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2470 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2471
2472 link_config[0] = 0;
2473 link_config[1] = DP_SET_ANSI_8B10B;
2474 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
2353 2475
2354 DP |= DP_PORT_EN; 2476 DP |= DP_PORT_EN;
2355 2477
2356 memset(intel_dp->train_set, 0, 4); 2478 /* clock recovery */
2479 if (!intel_dp_reset_link_train(intel_dp, &DP,
2480 DP_TRAINING_PATTERN_1 |
2481 DP_LINK_SCRAMBLING_DISABLE)) {
2482 DRM_ERROR("failed to enable link training\n");
2483 return;
2484 }
2485
2357 voltage = 0xff; 2486 voltage = 0xff;
2358 voltage_tries = 0; 2487 voltage_tries = 0;
2359 loop_tries = 0; 2488 loop_tries = 0;
2360 for (;;) { 2489 for (;;) {
2361 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2490 uint8_t link_status[DP_LINK_STATUS_SIZE];
2362 uint8_t link_status[DP_LINK_STATUS_SIZE];
2363
2364 intel_dp_set_signal_levels(intel_dp, &DP);
2365
2366 /* Set training pattern 1 */
2367 if (!intel_dp_set_link_train(intel_dp, DP,
2368 DP_TRAINING_PATTERN_1 |
2369 DP_LINK_SCRAMBLING_DISABLE))
2370 break;
2371 2491
2372 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2492 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2373 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2493 if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2390,7 +2510,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2390 DRM_DEBUG_KMS("too many full retries, give up\n"); 2510 DRM_DEBUG_KMS("too many full retries, give up\n");
2391 break; 2511 break;
2392 } 2512 }
2393 memset(intel_dp->train_set, 0, 4); 2513 intel_dp_reset_link_train(intel_dp, &DP,
2514 DP_TRAINING_PATTERN_1 |
2515 DP_LINK_SCRAMBLING_DISABLE);
2394 voltage_tries = 0; 2516 voltage_tries = 0;
2395 continue; 2517 continue;
2396 } 2518 }
@@ -2406,8 +2528,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2406 voltage_tries = 0; 2528 voltage_tries = 0;
2407 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2529 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2408 2530
2409 /* Compute new intel_dp->train_set as requested by target */ 2531 /* Update training set as requested by target */
2410 intel_get_adjust_train(intel_dp, link_status); 2532 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2533 DRM_ERROR("failed to update link training\n");
2534 break;
2535 }
2411 } 2536 }
2412 2537
2413 intel_dp->DP = DP; 2538 intel_dp->DP = DP;
@@ -2421,11 +2546,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2421 uint32_t DP = intel_dp->DP; 2546 uint32_t DP = intel_dp->DP;
2422 2547
2423 /* channel equalization */ 2548 /* channel equalization */
2549 if (!intel_dp_set_link_train(intel_dp, &DP,
2550 DP_TRAINING_PATTERN_2 |
2551 DP_LINK_SCRAMBLING_DISABLE)) {
2552 DRM_ERROR("failed to start channel equalization\n");
2553 return;
2554 }
2555
2424 tries = 0; 2556 tries = 0;
2425 cr_tries = 0; 2557 cr_tries = 0;
2426 channel_eq = false; 2558 channel_eq = false;
2427 for (;;) { 2559 for (;;) {
2428 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2560 uint8_t link_status[DP_LINK_STATUS_SIZE];
2429 2561
2430 if (cr_tries > 5) { 2562 if (cr_tries > 5) {
2431 DRM_ERROR("failed to train DP, aborting\n"); 2563 DRM_ERROR("failed to train DP, aborting\n");
@@ -2433,21 +2565,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2433 break; 2565 break;
2434 } 2566 }
2435 2567
2436 intel_dp_set_signal_levels(intel_dp, &DP);
2437
2438 /* channel eq pattern */
2439 if (!intel_dp_set_link_train(intel_dp, DP,
2440 DP_TRAINING_PATTERN_2 |
2441 DP_LINK_SCRAMBLING_DISABLE))
2442 break;
2443
2444 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2568 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2445 if (!intel_dp_get_link_status(intel_dp, link_status)) 2569 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2570 DRM_ERROR("failed to get link status\n");
2446 break; 2571 break;
2572 }
2447 2573
2448 /* Make sure clock is still ok */ 2574 /* Make sure clock is still ok */
2449 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2575 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2450 intel_dp_start_link_train(intel_dp); 2576 intel_dp_start_link_train(intel_dp);
2577 intel_dp_set_link_train(intel_dp, &DP,
2578 DP_TRAINING_PATTERN_2 |
2579 DP_LINK_SCRAMBLING_DISABLE);
2451 cr_tries++; 2580 cr_tries++;
2452 continue; 2581 continue;
2453 } 2582 }
@@ -2461,13 +2590,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2461 if (tries > 5) { 2590 if (tries > 5) {
2462 intel_dp_link_down(intel_dp); 2591 intel_dp_link_down(intel_dp);
2463 intel_dp_start_link_train(intel_dp); 2592 intel_dp_start_link_train(intel_dp);
2593 intel_dp_set_link_train(intel_dp, &DP,
2594 DP_TRAINING_PATTERN_2 |
2595 DP_LINK_SCRAMBLING_DISABLE);
2464 tries = 0; 2596 tries = 0;
2465 cr_tries++; 2597 cr_tries++;
2466 continue; 2598 continue;
2467 } 2599 }
2468 2600
2469 /* Compute new intel_dp->train_set as requested by target */ 2601 /* Update training set as requested by target */
2470 intel_get_adjust_train(intel_dp, link_status); 2602 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2603 DRM_ERROR("failed to update link training\n");
2604 break;
2605 }
2471 ++tries; 2606 ++tries;
2472 } 2607 }
2473 2608
@@ -2482,7 +2617,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2482 2617
2483void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2618void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2484{ 2619{
2485 intel_dp_set_link_train(intel_dp, intel_dp->DP, 2620 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2486 DP_TRAINING_PATTERN_DISABLE); 2621 DP_TRAINING_PATTERN_DISABLE);
2487} 2622}
2488 2623
@@ -2569,6 +2704,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2569static bool 2704static bool
2570intel_dp_get_dpcd(struct intel_dp *intel_dp) 2705intel_dp_get_dpcd(struct intel_dp *intel_dp)
2571{ 2706{
2707 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2708 struct drm_device *dev = dig_port->base.base.dev;
2709 struct drm_i915_private *dev_priv = dev->dev_private;
2710
2572 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2711 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2573 2712
2574 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2713 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2584,11 +2723,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2584 2723
2585 /* Check if the panel supports PSR */ 2724 /* Check if the panel supports PSR */
2586 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2725 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2587 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2726 if (is_edp(intel_dp)) {
2588 intel_dp->psr_dpcd, 2727 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2589 sizeof(intel_dp->psr_dpcd)); 2728 intel_dp->psr_dpcd,
2590 if (is_edp_psr(intel_dp)) 2729 sizeof(intel_dp->psr_dpcd));
2591 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2730 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2731 dev_priv->psr.sink_support = true;
2732 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2733 }
2734 }
2735
2592 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2736 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2593 DP_DWN_STRM_PORT_PRESENT)) 2737 DP_DWN_STRM_PORT_PRESENT))
2594 return true; /* native DP sink */ 2738 return true; /* native DP sink */
@@ -2708,7 +2852,6 @@ static enum drm_connector_status
2708intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2852intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2709{ 2853{
2710 uint8_t *dpcd = intel_dp->dpcd; 2854 uint8_t *dpcd = intel_dp->dpcd;
2711 bool hpd;
2712 uint8_t type; 2855 uint8_t type;
2713 2856
2714 if (!intel_dp_get_dpcd(intel_dp)) 2857 if (!intel_dp_get_dpcd(intel_dp))
@@ -2719,8 +2862,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2719 return connector_status_connected; 2862 return connector_status_connected;
2720 2863
2721 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2864 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2722 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2865 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2723 if (hpd) { 2866 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
2724 uint8_t reg; 2867 uint8_t reg;
2725 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2868 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2726 &reg, 1)) 2869 &reg, 1))
@@ -2734,9 +2877,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2734 return connector_status_connected; 2877 return connector_status_connected;
2735 2878
2736 /* Well we tried, say unknown for unreliable port types */ 2879 /* Well we tried, say unknown for unreliable port types */
2737 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2880 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
2738 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2881 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2739 return connector_status_unknown; 2882 if (type == DP_DS_PORT_TYPE_VGA ||
2883 type == DP_DS_PORT_TYPE_NON_EDID)
2884 return connector_status_unknown;
2885 } else {
2886 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2887 DP_DWN_STRM_PORT_TYPE_MASK;
2888 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
2889 type == DP_DWN_STRM_PORT_TYPE_OTHER)
2890 return connector_status_unknown;
2891 }
2740 2892
2741 /* Anything else is out of spec, warn and ignore */ 2893 /* Anything else is out of spec, warn and ignore */
2742 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2894 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2810,19 +2962,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2810 2962
2811 /* use cached edid if we have one */ 2963 /* use cached edid if we have one */
2812 if (intel_connector->edid) { 2964 if (intel_connector->edid) {
2813 struct edid *edid;
2814 int size;
2815
2816 /* invalid edid */ 2965 /* invalid edid */
2817 if (IS_ERR(intel_connector->edid)) 2966 if (IS_ERR(intel_connector->edid))
2818 return NULL; 2967 return NULL;
2819 2968
2820 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2969 return drm_edid_duplicate(intel_connector->edid);
2821 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2822 if (!edid)
2823 return NULL;
2824
2825 return edid;
2826 } 2970 }
2827 2971
2828 return drm_get_edid(connector, adapter); 2972 return drm_get_edid(connector, adapter);
@@ -3030,7 +3174,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
3030 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3174 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3031 intel_panel_fini(&intel_connector->panel); 3175 intel_panel_fini(&intel_connector->panel);
3032 3176
3033 drm_sysfs_connector_remove(connector);
3034 drm_connector_cleanup(connector); 3177 drm_connector_cleanup(connector);
3035 kfree(connector); 3178 kfree(connector);
3036} 3179}
@@ -3101,7 +3244,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3101bool intel_dpd_is_edp(struct drm_device *dev) 3244bool intel_dpd_is_edp(struct drm_device *dev)
3102{ 3245{
3103 struct drm_i915_private *dev_priv = dev->dev_private; 3246 struct drm_i915_private *dev_priv = dev->dev_private;
3104 struct child_device_config *p_child; 3247 union child_device_config *p_child;
3105 int i; 3248 int i;
3106 3249
3107 if (!dev_priv->vbt.child_dev_num) 3250 if (!dev_priv->vbt.child_dev_num)
@@ -3110,8 +3253,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3110 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3253 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3111 p_child = dev_priv->vbt.child_dev + i; 3254 p_child = dev_priv->vbt.child_dev + i;
3112 3255
3113 if (p_child->dvo_port == PORT_IDPD && 3256 if (p_child->common.dvo_port == PORT_IDPD &&
3114 p_child->device_type == DEVICE_TYPE_eDP) 3257 p_child->common.device_type == DEVICE_TYPE_eDP)
3115 return true; 3258 return true;
3116 } 3259 }
3117 return false; 3260 return false;
@@ -3144,24 +3287,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3144 struct drm_i915_private *dev_priv = dev->dev_private; 3287 struct drm_i915_private *dev_priv = dev->dev_private;
3145 struct edp_power_seq cur, vbt, spec, final; 3288 struct edp_power_seq cur, vbt, spec, final;
3146 u32 pp_on, pp_off, pp_div, pp; 3289 u32 pp_on, pp_off, pp_div, pp;
3147 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3290 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3148 3291
3149 if (HAS_PCH_SPLIT(dev)) { 3292 if (HAS_PCH_SPLIT(dev)) {
3150 pp_control_reg = PCH_PP_CONTROL; 3293 pp_ctrl_reg = PCH_PP_CONTROL;
3151 pp_on_reg = PCH_PP_ON_DELAYS; 3294 pp_on_reg = PCH_PP_ON_DELAYS;
3152 pp_off_reg = PCH_PP_OFF_DELAYS; 3295 pp_off_reg = PCH_PP_OFF_DELAYS;
3153 pp_div_reg = PCH_PP_DIVISOR; 3296 pp_div_reg = PCH_PP_DIVISOR;
3154 } else { 3297 } else {
3155 pp_control_reg = PIPEA_PP_CONTROL; 3298 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3156 pp_on_reg = PIPEA_PP_ON_DELAYS; 3299
3157 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3300 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3158 pp_div_reg = PIPEA_PP_DIVISOR; 3301 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3302 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3303 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3159 } 3304 }
3160 3305
3161 /* Workaround: Need to write PP_CONTROL with the unlock key as 3306 /* Workaround: Need to write PP_CONTROL with the unlock key as
3162 * the very first thing. */ 3307 * the very first thing. */
3163 pp = ironlake_get_pp_control(intel_dp); 3308 pp = ironlake_get_pp_control(intel_dp);
3164 I915_WRITE(pp_control_reg, pp); 3309 I915_WRITE(pp_ctrl_reg, pp);
3165 3310
3166 pp_on = I915_READ(pp_on_reg); 3311 pp_on = I915_READ(pp_on_reg);
3167 pp_off = I915_READ(pp_off_reg); 3312 pp_off = I915_READ(pp_off_reg);
@@ -3249,9 +3394,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3249 pp_off_reg = PCH_PP_OFF_DELAYS; 3394 pp_off_reg = PCH_PP_OFF_DELAYS;
3250 pp_div_reg = PCH_PP_DIVISOR; 3395 pp_div_reg = PCH_PP_DIVISOR;
3251 } else { 3396 } else {
3252 pp_on_reg = PIPEA_PP_ON_DELAYS; 3397 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3253 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3398
3254 pp_div_reg = PIPEA_PP_DIVISOR; 3399 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3400 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3401 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3255 } 3402 }
3256 3403
3257 /* And finally store the new values in the power sequencer. */ 3404 /* And finally store the new values in the power sequencer. */
@@ -3268,12 +3415,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3268 /* Haswell doesn't have any port selection bits for the panel 3415 /* Haswell doesn't have any port selection bits for the panel
3269 * power sequencer any more. */ 3416 * power sequencer any more. */
3270 if (IS_VALLEYVIEW(dev)) { 3417 if (IS_VALLEYVIEW(dev)) {
3271 port_sel = I915_READ(pp_on_reg) & 0xc0000000; 3418 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3419 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3420 else
3421 port_sel = PANEL_PORT_SELECT_DPC_VLV;
3272 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 3422 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3273 if (dp_to_dig_port(intel_dp)->port == PORT_A) 3423 if (dp_to_dig_port(intel_dp)->port == PORT_A)
3274 port_sel = PANEL_POWER_PORT_DP_A; 3424 port_sel = PANEL_PORT_SELECT_DPA;
3275 else 3425 else
3276 port_sel = PANEL_POWER_PORT_DP_D; 3426 port_sel = PANEL_PORT_SELECT_DPD;
3277 } 3427 }
3278 3428
3279 pp_on |= port_sel; 3429 pp_on |= port_sel;
@@ -3516,11 +3666,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3516 struct drm_encoder *encoder; 3666 struct drm_encoder *encoder;
3517 struct intel_connector *intel_connector; 3667 struct intel_connector *intel_connector;
3518 3668
3519 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 3669 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3520 if (!intel_dig_port) 3670 if (!intel_dig_port)
3521 return; 3671 return;
3522 3672
3523 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 3673 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3524 if (!intel_connector) { 3674 if (!intel_connector) {
3525 kfree(intel_dig_port); 3675 kfree(intel_dig_port);
3526 return; 3676 return;
@@ -3539,12 +3689,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3539 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3689 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3540 intel_encoder->get_config = intel_dp_get_config; 3690 intel_encoder->get_config = intel_dp_get_config;
3541 if (IS_VALLEYVIEW(dev)) { 3691 if (IS_VALLEYVIEW(dev)) {
3542 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3692 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3543 intel_encoder->pre_enable = vlv_pre_enable_dp; 3693 intel_encoder->pre_enable = vlv_pre_enable_dp;
3544 intel_encoder->enable = vlv_enable_dp; 3694 intel_encoder->enable = vlv_enable_dp;
3545 } else { 3695 } else {
3546 intel_encoder->pre_enable = intel_pre_enable_dp; 3696 intel_encoder->pre_enable = g4x_pre_enable_dp;
3547 intel_encoder->enable = intel_enable_dp; 3697 intel_encoder->enable = g4x_enable_dp;
3548 } 3698 }
3549 3699
3550 intel_dig_port->port = port; 3700 intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 28cae80495e2..eaf0003ddfd9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
77/* the i915, i945 have a single sDVO i2c bus - which is different */ 77/* the i915, i945 have a single sDVO i2c bus - which is different */
78#define MAX_OUTPUTS 6 78#define MAX_OUTPUTS 6
79/* maximum connectors per crtcs in the mode set */ 79/* maximum connectors per crtcs in the mode set */
80#define INTELFB_CONN_LIMIT 4
81 80
82#define INTEL_I2C_BUS_DVO 1 81#define INTEL_I2C_BUS_DVO 1
83#define INTEL_I2C_BUS_SDVO 2 82#define INTEL_I2C_BUS_SDVO 2
@@ -93,13 +92,17 @@
93#define INTEL_OUTPUT_HDMI 6 92#define INTEL_OUTPUT_HDMI 6
94#define INTEL_OUTPUT_DISPLAYPORT 7 93#define INTEL_OUTPUT_DISPLAYPORT 7
95#define INTEL_OUTPUT_EDP 8 94#define INTEL_OUTPUT_EDP 8
96#define INTEL_OUTPUT_UNKNOWN 9 95#define INTEL_OUTPUT_DSI 9
96#define INTEL_OUTPUT_UNKNOWN 10
97 97
98#define INTEL_DVO_CHIP_NONE 0 98#define INTEL_DVO_CHIP_NONE 0
99#define INTEL_DVO_CHIP_LVDS 1 99#define INTEL_DVO_CHIP_LVDS 1
100#define INTEL_DVO_CHIP_TMDS 2 100#define INTEL_DVO_CHIP_TMDS 2
101#define INTEL_DVO_CHIP_TVOUT 4 101#define INTEL_DVO_CHIP_TVOUT 4
102 102
103#define INTEL_DSI_COMMAND_MODE 0
104#define INTEL_DSI_VIDEO_MODE 1
105
103struct intel_framebuffer { 106struct intel_framebuffer {
104 struct drm_framebuffer base; 107 struct drm_framebuffer base;
105 struct drm_i915_gem_object *obj; 108 struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@ struct intel_crtc_config {
207#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 210#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
208 unsigned long quirks; 211 unsigned long quirks;
209 212
213 /* User requested mode, only valid as a starting point to
214 * compute adjusted_mode, except in the case of (S)DVO where
215 * it's also for the output timings of the (S)DVO chip.
216 * adjusted_mode will then correspond to the S(DVO) chip's
217 * preferred input timings. */
210 struct drm_display_mode requested_mode; 218 struct drm_display_mode requested_mode;
219 /* Actual pipe timings ie. what we program into the pipe timing
220 * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
211 struct drm_display_mode adjusted_mode; 221 struct drm_display_mode adjusted_mode;
222
223 /* Pipe source size (ie. panel fitter input size)
224 * All planes will be positioned inside this space,
225 * and get clipped at the edges. */
226 int pipe_src_w, pipe_src_h;
227
212 /* Whether to set up the PCH/FDI. Note that we never allow sharing 228 /* Whether to set up the PCH/FDI. Note that we never allow sharing
213 * between pch encoders and cpu encoders. */ 229 * between pch encoders and cpu encoders. */
214 bool has_pch_encoder; 230 bool has_pch_encoder;
@@ -262,7 +278,8 @@ struct intel_crtc_config {
262 278
263 /* 279 /*
264 * Frequence the dpll for the port should run at. Differs from the 280 * Frequence the dpll for the port should run at. Differs from the
265 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. 281 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
282 * already multiplied by pixel_multiplier.
266 */ 283 */
267 int port_clock; 284 int port_clock;
268 285
@@ -288,6 +305,8 @@ struct intel_crtc_config {
288 struct intel_link_m_n fdi_m_n; 305 struct intel_link_m_n fdi_m_n;
289 306
290 bool ips_enabled; 307 bool ips_enabled;
308
309 bool double_wide;
291}; 310};
292 311
293struct intel_crtc { 312struct intel_crtc {
@@ -417,13 +436,11 @@ struct intel_hdmi {
417}; 436};
418 437
419#define DP_MAX_DOWNSTREAM_PORTS 0x10 438#define DP_MAX_DOWNSTREAM_PORTS 0x10
420#define DP_LINK_CONFIGURATION_SIZE 9
421 439
422struct intel_dp { 440struct intel_dp {
423 uint32_t output_reg; 441 uint32_t output_reg;
424 uint32_t aux_ch_ctl_reg; 442 uint32_t aux_ch_ctl_reg;
425 uint32_t DP; 443 uint32_t DP;
426 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
427 bool has_audio; 444 bool has_audio;
428 enum hdmi_force_audio force_audio; 445 enum hdmi_force_audio force_audio;
429 uint32_t color_range; 446 uint32_t color_range;
@@ -495,80 +512,6 @@ struct intel_unpin_work {
495 bool enable_stall_check; 512 bool enable_stall_check;
496}; 513};
497 514
498int intel_pch_rawclk(struct drm_device *dev);
499
500int intel_connector_update_modes(struct drm_connector *connector,
501 struct edid *edid);
502int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
503
504extern void intel_attach_force_audio_property(struct drm_connector *connector);
505extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
506
507extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
508extern void intel_crt_init(struct drm_device *dev);
509extern void intel_hdmi_init(struct drm_device *dev,
510 int hdmi_reg, enum port port);
511extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
512 struct intel_connector *intel_connector);
513extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
514extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
515 struct intel_crtc_config *pipe_config);
516extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
517 bool is_sdvob);
518extern void intel_dvo_init(struct drm_device *dev);
519extern void intel_tv_init(struct drm_device *dev);
520extern void intel_mark_busy(struct drm_device *dev);
521extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
522 struct intel_ring_buffer *ring);
523extern void intel_mark_idle(struct drm_device *dev);
524extern void intel_lvds_init(struct drm_device *dev);
525extern bool intel_is_dual_link_lvds(struct drm_device *dev);
526extern void intel_dp_init(struct drm_device *dev, int output_reg,
527 enum port port);
528extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
529 struct intel_connector *intel_connector);
530extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
531extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
532extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
533extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
534extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
535extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
536extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
537extern bool intel_dp_compute_config(struct intel_encoder *encoder,
538 struct intel_crtc_config *pipe_config);
539extern bool intel_dpd_is_edp(struct drm_device *dev);
540extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
541extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
542extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
543extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
544extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
545extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
546extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
547extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
548 enum plane plane);
549
550/* intel_panel.c */
551extern int intel_panel_init(struct intel_panel *panel,
552 struct drm_display_mode *fixed_mode);
553extern void intel_panel_fini(struct intel_panel *panel);
554
555extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
556 struct drm_display_mode *adjusted_mode);
557extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
558 struct intel_crtc_config *pipe_config,
559 int fitting_mode);
560extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
561 struct intel_crtc_config *pipe_config,
562 int fitting_mode);
563extern void intel_panel_set_backlight(struct drm_device *dev,
564 u32 level, u32 max);
565extern int intel_panel_setup_backlight(struct drm_connector *connector);
566extern void intel_panel_enable_backlight(struct drm_device *dev,
567 enum pipe pipe);
568extern void intel_panel_disable_backlight(struct drm_device *dev);
569extern void intel_panel_destroy_backlight(struct drm_device *dev);
570extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
571
572struct intel_set_config { 515struct intel_set_config {
573 struct drm_encoder **save_connector_encoders; 516 struct drm_encoder **save_connector_encoders;
574 struct drm_crtc **save_encoder_crtcs; 517 struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +520,14 @@ struct intel_set_config {
577 bool mode_changed; 520 bool mode_changed;
578}; 521};
579 522
580extern void intel_crtc_restore_mode(struct drm_crtc *crtc); 523struct intel_load_detect_pipe {
581extern void intel_crtc_load_lut(struct drm_crtc *crtc); 524 struct drm_framebuffer *release_fb;
582extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 525 bool load_detect_temp;
583extern void intel_encoder_destroy(struct drm_encoder *encoder); 526 int dpms_mode;
584extern void intel_connector_dpms(struct drm_connector *, int mode); 527};
585extern bool intel_connector_get_hw_state(struct intel_connector *connector);
586extern void intel_modeset_check_state(struct drm_device *dev);
587extern void intel_plane_restore(struct drm_plane *plane);
588extern void intel_plane_disable(struct drm_plane *plane);
589
590 528
591static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 529static inline struct intel_encoder *
530intel_attached_encoder(struct drm_connector *connector)
592{ 531{
593 return to_intel_connector(connector)->encoder; 532 return to_intel_connector(connector)->encoder;
594} 533}
@@ -616,73 +555,94 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
616 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 555 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
617} 556}
618 557
558
559/* i915_irq.c */
560bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
561 enum pipe pipe, bool enable);
562bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
563 enum transcoder pch_transcoder,
564 bool enable);
565void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
566void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
567void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
568void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
569void hsw_pc8_disable_interrupts(struct drm_device *dev);
570void hsw_pc8_restore_interrupts(struct drm_device *dev);
571
572
573/* intel_crt.c */
574void intel_crt_init(struct drm_device *dev);
575
576
577/* intel_ddi.c */
578void intel_prepare_ddi(struct drm_device *dev);
579void hsw_fdi_link_train(struct drm_crtc *crtc);
580void intel_ddi_init(struct drm_device *dev, enum port port);
581enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
582bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
583int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
584void intel_ddi_pll_init(struct drm_device *dev);
585void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
586void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
587 enum transcoder cpu_transcoder);
588void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
589void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
590void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
591bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
592void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
593void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
594void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
595bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
596void intel_ddi_fdi_disable(struct drm_crtc *crtc);
597void intel_ddi_get_config(struct intel_encoder *encoder,
598 struct intel_crtc_config *pipe_config);
599
600
601/* intel_display.c */
602int intel_pch_rawclk(struct drm_device *dev);
603void intel_mark_busy(struct drm_device *dev);
604void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
605 struct intel_ring_buffer *ring);
606void intel_mark_idle(struct drm_device *dev);
607void intel_crtc_restore_mode(struct drm_crtc *crtc);
608void intel_crtc_update_dpms(struct drm_crtc *crtc);
609void intel_encoder_destroy(struct drm_encoder *encoder);
610void intel_connector_dpms(struct drm_connector *, int mode);
611bool intel_connector_get_hw_state(struct intel_connector *connector);
612void intel_modeset_check_state(struct drm_device *dev);
619bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 613bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
620 struct intel_digital_port *port); 614 struct intel_digital_port *port);
621 615void intel_connector_attach_encoder(struct intel_connector *connector,
622extern void intel_connector_attach_encoder(struct intel_connector *connector, 616 struct intel_encoder *encoder);
623 struct intel_encoder *encoder); 617struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
624extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 618struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
625 619 struct drm_crtc *crtc);
626extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
627 struct drm_crtc *crtc);
628int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 620int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
629 struct drm_file *file_priv); 621 struct drm_file *file_priv);
630extern enum transcoder 622enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
631intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 623 enum pipe pipe);
632 enum pipe pipe); 624void intel_wait_for_vblank(struct drm_device *dev, int pipe);
633extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 625void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
634extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 626int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
635extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 627void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
636extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port); 628bool intel_get_load_detect_pipe(struct drm_connector *connector,
637 629 struct drm_display_mode *mode,
638struct intel_load_detect_pipe { 630 struct intel_load_detect_pipe *old);
639 struct drm_framebuffer *release_fb; 631void intel_release_load_detect_pipe(struct drm_connector *connector,
640 bool load_detect_temp; 632 struct intel_load_detect_pipe *old);
641 int dpms_mode; 633int intel_pin_and_fence_fb_obj(struct drm_device *dev,
642}; 634 struct drm_i915_gem_object *obj,
643extern bool intel_get_load_detect_pipe(struct drm_connector *connector, 635 struct intel_ring_buffer *pipelined);
644 struct drm_display_mode *mode, 636void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
645 struct intel_load_detect_pipe *old); 637int intel_framebuffer_init(struct drm_device *dev,
646extern void intel_release_load_detect_pipe(struct drm_connector *connector, 638 struct intel_framebuffer *ifb,
647 struct intel_load_detect_pipe *old); 639 struct drm_mode_fb_cmd2 *mode_cmd,
648 640 struct drm_i915_gem_object *obj);
649extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 641void intel_framebuffer_fini(struct intel_framebuffer *fb);
650 u16 blue, int regno); 642void intel_prepare_page_flip(struct drm_device *dev, int plane);
651extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 643void intel_finish_page_flip(struct drm_device *dev, int pipe);
652 u16 *blue, int regno); 644void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
653 645struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
654extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
655 struct drm_i915_gem_object *obj,
656 struct intel_ring_buffer *pipelined);
657extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
658
659extern int intel_framebuffer_init(struct drm_device *dev,
660 struct intel_framebuffer *ifb,
661 struct drm_mode_fb_cmd2 *mode_cmd,
662 struct drm_i915_gem_object *obj);
663extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
664extern int intel_fbdev_init(struct drm_device *dev);
665extern void intel_fbdev_initial_config(struct drm_device *dev);
666extern void intel_fbdev_fini(struct drm_device *dev);
667extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
668extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
669extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
670extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
671
672extern void intel_setup_overlay(struct drm_device *dev);
673extern void intel_cleanup_overlay(struct drm_device *dev);
674extern int intel_overlay_switch_off(struct intel_overlay *overlay);
675extern int intel_overlay_put_image(struct drm_device *dev, void *data,
676 struct drm_file *file_priv);
677extern int intel_overlay_attrs(struct drm_device *dev, void *data,
678 struct drm_file *file_priv);
679
680extern void intel_fb_output_poll_changed(struct drm_device *dev);
681extern void intel_fb_restore_mode(struct drm_device *dev);
682
683struct intel_shared_dpll *
684intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
685
686void assert_shared_dpll(struct drm_i915_private *dev_priv, 646void assert_shared_dpll(struct drm_i915_private *dev_priv,
687 struct intel_shared_dpll *pll, 647 struct intel_shared_dpll *pll,
688 bool state); 648 bool state);
@@ -696,103 +656,173 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
696 enum pipe pipe, bool state); 656 enum pipe pipe, bool state);
697#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) 657#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
698#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) 658#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
699extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 659void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
700 bool state);
701#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 660#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
702#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 661#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
662void intel_write_eld(struct drm_encoder *encoder,
663 struct drm_display_mode *mode);
664unsigned long intel_gen4_compute_page_offset(int *x, int *y,
665 unsigned int tiling_mode,
666 unsigned int bpp,
667 unsigned int pitch);
668void intel_display_handle_reset(struct drm_device *dev);
669void hsw_enable_pc8_work(struct work_struct *__work);
670void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
671void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
672void intel_dp_get_m_n(struct intel_crtc *crtc,
673 struct intel_crtc_config *pipe_config);
674int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
675void
676ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
677 int dotclock);
678bool intel_crtc_active(struct drm_crtc *crtc);
679void i915_disable_vga_mem(struct drm_device *dev);
680
681
682/* intel_dp.c */
683void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
684bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
685 struct intel_connector *intel_connector);
686void intel_dp_start_link_train(struct intel_dp *intel_dp);
687void intel_dp_complete_link_train(struct intel_dp *intel_dp);
688void intel_dp_stop_link_train(struct intel_dp *intel_dp);
689void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
690void intel_dp_encoder_destroy(struct drm_encoder *encoder);
691void intel_dp_check_link_status(struct intel_dp *intel_dp);
692bool intel_dp_compute_config(struct intel_encoder *encoder,
693 struct intel_crtc_config *pipe_config);
694bool intel_dpd_is_edp(struct drm_device *dev);
695void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
696void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
697void ironlake_edp_panel_on(struct intel_dp *intel_dp);
698void ironlake_edp_panel_off(struct intel_dp *intel_dp);
699void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
700void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
701void intel_edp_psr_enable(struct intel_dp *intel_dp);
702void intel_edp_psr_disable(struct intel_dp *intel_dp);
703void intel_edp_psr_update(struct drm_device *dev);
704
705
706/* intel_dsi.c */
707bool intel_dsi_init(struct drm_device *dev);
708
709
710/* intel_dvo.c */
711void intel_dvo_init(struct drm_device *dev);
712
713
714/* intel_fb.c */
715int intel_fbdev_init(struct drm_device *dev);
716void intel_fbdev_initial_config(struct drm_device *dev);
717void intel_fbdev_fini(struct drm_device *dev);
718void intel_fbdev_set_suspend(struct drm_device *dev, int state);
719void intel_fb_output_poll_changed(struct drm_device *dev);
720void intel_fb_restore_mode(struct drm_device *dev);
721
722
723/* intel_hdmi.c */
724void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
725void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
726 struct intel_connector *intel_connector);
727struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
728bool intel_hdmi_compute_config(struct intel_encoder *encoder,
729 struct intel_crtc_config *pipe_config);
730
731
732/* intel_lvds.c */
733void intel_lvds_init(struct drm_device *dev);
734bool intel_is_dual_link_lvds(struct drm_device *dev);
735
736
737/* intel_modes.c */
738int intel_connector_update_modes(struct drm_connector *connector,
739 struct edid *edid);
740int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
741void intel_attach_force_audio_property(struct drm_connector *connector);
742void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
703 743
704extern void intel_init_clock_gating(struct drm_device *dev); 744
705extern void intel_suspend_hw(struct drm_device *dev); 745/* intel_overlay.c */
706extern void intel_write_eld(struct drm_encoder *encoder, 746void intel_setup_overlay(struct drm_device *dev);
707 struct drm_display_mode *mode); 747void intel_cleanup_overlay(struct drm_device *dev);
708extern void intel_prepare_ddi(struct drm_device *dev); 748int intel_overlay_switch_off(struct intel_overlay *overlay);
709extern void hsw_fdi_link_train(struct drm_crtc *crtc); 749int intel_overlay_put_image(struct drm_device *dev, void *data,
710extern void intel_ddi_init(struct drm_device *dev, enum port port); 750 struct drm_file *file_priv);
711 751int intel_overlay_attrs(struct drm_device *dev, void *data,
712/* For use by IVB LP watermark workaround in intel_sprite.c */ 752 struct drm_file *file_priv);
713extern void intel_update_watermarks(struct drm_device *dev); 753
714extern void intel_update_sprite_watermarks(struct drm_plane *plane, 754
715 struct drm_crtc *crtc, 755/* intel_panel.c */
716 uint32_t sprite_width, int pixel_size, 756int intel_panel_init(struct intel_panel *panel,
717 bool enabled, bool scaled); 757 struct drm_display_mode *fixed_mode);
718 758void intel_panel_fini(struct intel_panel *panel);
719extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 759void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
720 unsigned int tiling_mode, 760 struct drm_display_mode *adjusted_mode);
721 unsigned int bpp, 761void intel_pch_panel_fitting(struct intel_crtc *crtc,
722 unsigned int pitch); 762 struct intel_crtc_config *pipe_config,
723 763 int fitting_mode);
724extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 764void intel_gmch_panel_fitting(struct intel_crtc *crtc,
725 struct drm_file *file_priv); 765 struct intel_crtc_config *pipe_config,
726extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 766 int fitting_mode);
727 struct drm_file *file_priv); 767void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max);
728 768int intel_panel_setup_backlight(struct drm_connector *connector);
729/* Power-related functions, located in intel_pm.c */ 769void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe);
730extern void intel_init_pm(struct drm_device *dev); 770void intel_panel_disable_backlight(struct drm_device *dev);
731/* FBC */ 771void intel_panel_destroy_backlight(struct drm_device *dev);
732extern bool intel_fbc_enabled(struct drm_device *dev); 772enum drm_connector_status intel_panel_detect(struct drm_device *dev);
733extern void intel_update_fbc(struct drm_device *dev); 773
734/* IPS */ 774
735extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 775/* intel_pm.c */
736extern void intel_gpu_ips_teardown(void); 776void intel_init_clock_gating(struct drm_device *dev);
737 777void intel_suspend_hw(struct drm_device *dev);
738/* Power well */ 778void intel_update_watermarks(struct drm_crtc *crtc);
739extern int i915_init_power_well(struct drm_device *dev); 779void intel_update_sprite_watermarks(struct drm_plane *plane,
740extern void i915_remove_power_well(struct drm_device *dev); 780 struct drm_crtc *crtc,
741 781 uint32_t sprite_width, int pixel_size,
742extern bool intel_display_power_enabled(struct drm_device *dev, 782 bool enabled, bool scaled);
743 enum intel_display_power_domain domain); 783void intel_init_pm(struct drm_device *dev);
744extern void intel_init_power_well(struct drm_device *dev); 784bool intel_fbc_enabled(struct drm_device *dev);
745extern void intel_set_power_well(struct drm_device *dev, bool enable); 785void intel_update_fbc(struct drm_device *dev);
746extern void intel_enable_gt_powersave(struct drm_device *dev); 786void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
747extern void intel_disable_gt_powersave(struct drm_device *dev); 787void intel_gpu_ips_teardown(void);
748extern void ironlake_teardown_rc6(struct drm_device *dev); 788int i915_init_power_well(struct drm_device *dev);
789void i915_remove_power_well(struct drm_device *dev);
790bool intel_display_power_enabled(struct drm_device *dev,
791 enum intel_display_power_domain domain);
792void intel_display_power_get(struct drm_device *dev,
793 enum intel_display_power_domain domain);
794void intel_display_power_put(struct drm_device *dev,
795 enum intel_display_power_domain domain);
796void intel_init_power_well(struct drm_device *dev);
797void intel_set_power_well(struct drm_device *dev, bool enable);
798void intel_enable_gt_powersave(struct drm_device *dev);
799void intel_disable_gt_powersave(struct drm_device *dev);
800void ironlake_teardown_rc6(struct drm_device *dev);
749void gen6_update_ring_freq(struct drm_device *dev); 801void gen6_update_ring_freq(struct drm_device *dev);
802void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
803void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
804
805
806/* intel_sdvo.c */
807bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
808
809
810/* intel_sprite.c */
811int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
812void intel_flush_display_plane(struct drm_i915_private *dev_priv,
813 enum plane plane);
814void intel_plane_restore(struct drm_plane *plane);
815void intel_plane_disable(struct drm_plane *plane);
816int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
817 struct drm_file *file_priv);
818int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
819 struct drm_file *file_priv);
820
821
822/* intel_tv.c */
823void intel_tv_init(struct drm_device *dev);
750 824
751extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 825void gen6_rps_idle(struct drm_i915_private *dev_priv);
752 enum pipe *pipe); 826void gen6_rps_boost(struct drm_i915_private *dev_priv);
753extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
754extern void intel_ddi_pll_init(struct drm_device *dev);
755extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
756extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
757 enum transcoder cpu_transcoder);
758extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
759extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
760extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
761extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
762extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
763extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
764extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
765extern bool
766intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
767extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
768
769extern void intel_display_handle_reset(struct drm_device *dev);
770extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
771 enum pipe pipe,
772 bool enable);
773extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
774 enum transcoder pch_transcoder,
775 bool enable);
776
777extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
778extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
779extern void intel_edp_psr_update(struct drm_device *dev);
780extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
781 bool switch_to_fclk, bool allow_power_down);
782extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
783extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
784extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
785 uint32_t mask);
786extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
787extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
788 uint32_t mask);
789extern void hsw_enable_pc8_work(struct work_struct *__work);
790extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
791extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
792extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
793extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
794extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
795extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
796extern void i915_disable_vga_mem(struct drm_device *dev);
797 827
798#endif /* __INTEL_DRV_H__ */ 828#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..9a2fdd2a7e34
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,620 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm_crtc.h>
28#include <drm/drm_edid.h>
29#include <drm/i915_drm.h>
30#include <linux/slab.h>
31#include "i915_drv.h"
32#include "intel_drv.h"
33#include "intel_dsi.h"
34#include "intel_dsi_cmd.h"
35
36/* the sub-encoders aka panel drivers */
37static const struct intel_dsi_device intel_dsi_devices[] = {
38};
39
40
41static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
42 u32 mask)
43{
44 u32 tmp = vlv_cck_read(dev_priv, reg);
45 tmp &= ~mask;
46 tmp |= val;
47 vlv_cck_write(dev_priv, reg, tmp);
48}
49
50static void band_gap_wa(struct drm_i915_private *dev_priv)
51{
52 mutex_lock(&dev_priv->dpio_lock);
53
54 /* Enable bandgap fix in GOP driver */
55 vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
56 msleep(20);
57 vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
58 msleep(20);
59 vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
60 msleep(20);
61 vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
62 msleep(20);
63 vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
64 msleep(20);
65
66 /* Turn Display Trunk on */
67 vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
68 msleep(20);
69
70 vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
71 msleep(20);
72
73 vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
74 msleep(20);
75 vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
76 msleep(20);
77 vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
78
79 mutex_unlock(&dev_priv->dpio_lock);
80
81 /* Need huge delay, otherwise clock is not stable */
82 msleep(100);
83}
84
85static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
86{
87 return container_of(intel_attached_encoder(connector),
88 struct intel_dsi, base);
89}
90
91static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
92{
93 return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
94}
95
96static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
97{
98 return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
99}
100
101static void intel_dsi_hot_plug(struct intel_encoder *encoder)
102{
103 DRM_DEBUG_KMS("\n");
104}
105
106static bool intel_dsi_compute_config(struct intel_encoder *encoder,
107 struct intel_crtc_config *config)
108{
109 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
110 base);
111 struct intel_connector *intel_connector = intel_dsi->attached_connector;
112 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
113 struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
114 struct drm_display_mode *mode = &config->requested_mode;
115
116 DRM_DEBUG_KMS("\n");
117
118 if (fixed_mode)
119 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
120
121 if (intel_dsi->dev.dev_ops->mode_fixup)
122 return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
123 mode, adjusted_mode);
124
125 return true;
126}
127
128static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
129{
130 DRM_DEBUG_KMS("\n");
131
132 vlv_enable_dsi_pll(encoder);
133}
134
135static void intel_dsi_pre_enable(struct intel_encoder *encoder)
136{
137 DRM_DEBUG_KMS("\n");
138}
139
140static void intel_dsi_enable(struct intel_encoder *encoder)
141{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
143 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
144 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
145 int pipe = intel_crtc->pipe;
146 u32 temp;
147
148 DRM_DEBUG_KMS("\n");
149
150 temp = I915_READ(MIPI_DEVICE_READY(pipe));
151 if ((temp & DEVICE_READY) == 0) {
152 temp &= ~ULPS_STATE_MASK;
153 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
154 } else if (temp & ULPS_STATE_MASK) {
155 temp &= ~ULPS_STATE_MASK;
156 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
157 /*
158 * We need to ensure that there is a minimum of 1 ms time
159 * available before clearing the UPLS exit state.
160 */
161 msleep(2);
162 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
163 }
164
165 if (is_cmd_mode(intel_dsi))
166 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
167
168 if (is_vid_mode(intel_dsi)) {
169 msleep(20); /* XXX */
170 dpi_send_cmd(intel_dsi, TURN_ON);
171 msleep(100);
172
173 /* assert ip_tg_enable signal */
174 temp = I915_READ(MIPI_PORT_CTRL(pipe));
175 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
176 POSTING_READ(MIPI_PORT_CTRL(pipe));
177 }
178
179 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
180}
181
182static void intel_dsi_disable(struct intel_encoder *encoder)
183{
184 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
185 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
186 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
187 int pipe = intel_crtc->pipe;
188 u32 temp;
189
190 DRM_DEBUG_KMS("\n");
191
192 intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
193
194 if (is_vid_mode(intel_dsi)) {
195 dpi_send_cmd(intel_dsi, SHUTDOWN);
196 msleep(10);
197
198 /* de-assert ip_tg_enable signal */
199 temp = I915_READ(MIPI_PORT_CTRL(pipe));
200 I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
201 POSTING_READ(MIPI_PORT_CTRL(pipe));
202
203 msleep(2);
204 }
205
206 temp = I915_READ(MIPI_DEVICE_READY(pipe));
207 if (temp & DEVICE_READY) {
208 temp &= ~DEVICE_READY;
209 temp &= ~ULPS_STATE_MASK;
210 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
211 }
212}
213
214static void intel_dsi_post_disable(struct intel_encoder *encoder)
215{
216 DRM_DEBUG_KMS("\n");
217
218 vlv_disable_dsi_pll(encoder);
219}
220
221static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
222 enum pipe *pipe)
223{
224 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
225 u32 port, func;
226 enum pipe p;
227
228 DRM_DEBUG_KMS("\n");
229
230 /* XXX: this only works for one DSI output */
231 for (p = PIPE_A; p <= PIPE_B; p++) {
232 port = I915_READ(MIPI_PORT_CTRL(p));
233 func = I915_READ(MIPI_DSI_FUNC_PRG(p));
234
235 if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
236 if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
237 *pipe = p;
238 return true;
239 }
240 }
241 }
242
243 return false;
244}
245
246static void intel_dsi_get_config(struct intel_encoder *encoder,
247 struct intel_crtc_config *pipe_config)
248{
249 DRM_DEBUG_KMS("\n");
250
251 /* XXX: read flags, set to adjusted_mode */
252}
253
254static int intel_dsi_mode_valid(struct drm_connector *connector,
255 struct drm_display_mode *mode)
256{
257 struct intel_connector *intel_connector = to_intel_connector(connector);
258 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
259 struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
260
261 DRM_DEBUG_KMS("\n");
262
263 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
264 DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
265 return MODE_NO_DBLESCAN;
266 }
267
268 if (fixed_mode) {
269 if (mode->hdisplay > fixed_mode->hdisplay)
270 return MODE_PANEL;
271 if (mode->vdisplay > fixed_mode->vdisplay)
272 return MODE_PANEL;
273 }
274
275 return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
276}
277
278/* return txclkesc cycles in terms of divider and duration in us */
279static u16 txclkesc(u32 divider, unsigned int us)
280{
281 switch (divider) {
282 case ESCAPE_CLOCK_DIVIDER_1:
283 default:
284 return 20 * us;
285 case ESCAPE_CLOCK_DIVIDER_2:
286 return 10 * us;
287 case ESCAPE_CLOCK_DIVIDER_4:
288 return 5 * us;
289 }
290}
291
292/* return pixels in terms of txbyteclkhs */
293static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
294{
295 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
296}
297
298static void set_dsi_timings(struct drm_encoder *encoder,
299 const struct drm_display_mode *mode)
300{
301 struct drm_device *dev = encoder->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
304 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
305 int pipe = intel_crtc->pipe;
306 unsigned int bpp = intel_crtc->config.pipe_bpp;
307 unsigned int lane_count = intel_dsi->lane_count;
308
309 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
310
311 hactive = mode->hdisplay;
312 hfp = mode->hsync_start - mode->hdisplay;
313 hsync = mode->hsync_end - mode->hsync_start;
314 hbp = mode->htotal - mode->hsync_end;
315
316 vfp = mode->vsync_start - mode->vdisplay;
317 vsync = mode->vsync_end - mode->vsync_start;
318 vbp = mode->vtotal - mode->vsync_end;
319
320 /* horizontal values are in terms of high speed byte clock */
321 hactive = txbyteclkhs(hactive, bpp, lane_count);
322 hfp = txbyteclkhs(hfp, bpp, lane_count);
323 hsync = txbyteclkhs(hsync, bpp, lane_count);
324 hbp = txbyteclkhs(hbp, bpp, lane_count);
325
326 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
327 I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
328
329 /* meaningful for video mode non-burst sync pulse mode only, can be zero
330 * for non-burst sync events and burst modes */
331 I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
332 I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
333
334 /* vertical values are in terms of lines */
335 I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
336 I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
337 I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
338}
339
340static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
341{
342 struct drm_encoder *encoder = &intel_encoder->base;
343 struct drm_device *dev = encoder->dev;
344 struct drm_i915_private *dev_priv = dev->dev_private;
345 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
346 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
347 struct drm_display_mode *adjusted_mode =
348 &intel_crtc->config.adjusted_mode;
349 int pipe = intel_crtc->pipe;
350 unsigned int bpp = intel_crtc->config.pipe_bpp;
351 u32 val, tmp;
352
353 DRM_DEBUG_KMS("pipe %d\n", pipe);
354
355 /* Update the DSI PLL */
356 vlv_enable_dsi_pll(intel_encoder);
357
358 /* XXX: Location of the call */
359 band_gap_wa(dev_priv);
360
361 /* escape clock divider, 20MHz, shared for A and C. device ready must be
362 * off when doing this! txclkesc? */
363 tmp = I915_READ(MIPI_CTRL(0));
364 tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
365 I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
366
367 /* read request priority is per pipe */
368 tmp = I915_READ(MIPI_CTRL(pipe));
369 tmp &= ~READ_REQUEST_PRIORITY_MASK;
370 I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
371
372 /* XXX: why here, why like this? handling in irq handler?! */
373 I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
374 I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
375
376 I915_WRITE(MIPI_DPHY_PARAM(pipe),
377 0x3c << EXIT_ZERO_COUNT_SHIFT |
378 0x1f << TRAIL_COUNT_SHIFT |
379 0xc5 << CLK_ZERO_COUNT_SHIFT |
380 0x1f << PREPARE_COUNT_SHIFT);
381
382 I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
383 adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
384 adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
385
386 set_dsi_timings(encoder, adjusted_mode);
387
388 val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
389 if (is_cmd_mode(intel_dsi)) {
390 val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
391 val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
392 } else {
393 val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
394
395 /* XXX: cross-check bpp vs. pixel format? */
396 val |= intel_dsi->pixel_format;
397 }
398 I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
399
400 /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
401 * stop state. */
402
403 /*
404 * In burst mode, value greater than one DPI line Time in byte clock
405 * (txbyteclkhs) To timeout this timer 1+ of the above said value is
406 * recommended.
407 *
408 * In non-burst mode, Value greater than one DPI frame time in byte
409 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
410 * is recommended.
411 *
412 * In DBI only mode, value greater than one DBI frame time in byte
413 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
414 * is recommended.
415 */
416
417 if (is_vid_mode(intel_dsi) &&
418 intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
419 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
420 txbyteclkhs(adjusted_mode->htotal, bpp,
421 intel_dsi->lane_count) + 1);
422 } else {
423 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
424 txbyteclkhs(adjusted_mode->vtotal *
425 adjusted_mode->htotal,
426 bpp, intel_dsi->lane_count) + 1);
427 }
428 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
429 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
430 I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
431
432 /* dphy stuff */
433
434 /* in terms of low power clock */
435 I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
436
437 /* recovery disables */
438 I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
439
440 /* in terms of txbyteclkhs. actual high to low switch +
441 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
442 *
443 * XXX: write MIPI_STOP_STATE_STALL?
444 */
445 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
446
447 /* XXX: low power clock equivalence in terms of byte clock. the number
448 * of byte clocks occupied in one low power clock. based on txbyteclkhs
449 * and txclkesc. txclkesc time / txbyteclk time * (105 +
450 * MIPI_STOP_STATE_STALL) / 105.???
451 */
452 I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
453
454 /* the bw essential for transmitting 16 long packets containing 252
455 * bytes meant for dcs write memory command is programmed in this
456 * register in terms of byte clocks. based on dsi transfer rate and the
457 * number of lanes configured the time taken to transmit 16 long packets
458 * in a dsi stream varies. */
459 I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
460
461 I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
462 0xa << LP_HS_SSW_CNT_SHIFT |
463 0x14 << HS_LP_PWR_SW_CNT_SHIFT);
464
465 if (is_vid_mode(intel_dsi))
466 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
467 intel_dsi->video_mode_format);
468}
469
470static enum drm_connector_status
471intel_dsi_detect(struct drm_connector *connector, bool force)
472{
473 struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
474 DRM_DEBUG_KMS("\n");
475 return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
476}
477
478static int intel_dsi_get_modes(struct drm_connector *connector)
479{
480 struct intel_connector *intel_connector = to_intel_connector(connector);
481 struct drm_display_mode *mode;
482
483 DRM_DEBUG_KMS("\n");
484
485 if (!intel_connector->panel.fixed_mode) {
486 DRM_DEBUG_KMS("no fixed mode\n");
487 return 0;
488 }
489
490 mode = drm_mode_duplicate(connector->dev,
491 intel_connector->panel.fixed_mode);
492 if (!mode) {
493 DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
494 return 0;
495 }
496
497 drm_mode_probed_add(connector, mode);
498 return 1;
499}
500
501static void intel_dsi_destroy(struct drm_connector *connector)
502{
503 struct intel_connector *intel_connector = to_intel_connector(connector);
504
505 DRM_DEBUG_KMS("\n");
506 intel_panel_fini(&intel_connector->panel);
507 drm_connector_cleanup(connector);
508 kfree(connector);
509}
510
511static const struct drm_encoder_funcs intel_dsi_funcs = {
512 .destroy = intel_encoder_destroy,
513};
514
515static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
516 .get_modes = intel_dsi_get_modes,
517 .mode_valid = intel_dsi_mode_valid,
518 .best_encoder = intel_best_encoder,
519};
520
521static const struct drm_connector_funcs intel_dsi_connector_funcs = {
522 .dpms = intel_connector_dpms,
523 .detect = intel_dsi_detect,
524 .destroy = intel_dsi_destroy,
525 .fill_modes = drm_helper_probe_single_connector_modes,
526};
527
528bool intel_dsi_init(struct drm_device *dev)
529{
530 struct intel_dsi *intel_dsi;
531 struct intel_encoder *intel_encoder;
532 struct drm_encoder *encoder;
533 struct intel_connector *intel_connector;
534 struct drm_connector *connector;
535 struct drm_display_mode *fixed_mode = NULL;
536 const struct intel_dsi_device *dsi;
537 unsigned int i;
538
539 DRM_DEBUG_KMS("\n");
540
541 intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
542 if (!intel_dsi)
543 return false;
544
545 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
546 if (!intel_connector) {
547 kfree(intel_dsi);
548 return false;
549 }
550
551 intel_encoder = &intel_dsi->base;
552 encoder = &intel_encoder->base;
553 intel_dsi->attached_connector = intel_connector;
554
555 connector = &intel_connector->base;
556
557 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
558
559 /* XXX: very likely not all of these are needed */
560 intel_encoder->hot_plug = intel_dsi_hot_plug;
561 intel_encoder->compute_config = intel_dsi_compute_config;
562 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
563 intel_encoder->pre_enable = intel_dsi_pre_enable;
564 intel_encoder->enable = intel_dsi_enable;
565 intel_encoder->mode_set = intel_dsi_mode_set;
566 intel_encoder->disable = intel_dsi_disable;
567 intel_encoder->post_disable = intel_dsi_post_disable;
568 intel_encoder->get_hw_state = intel_dsi_get_hw_state;
569 intel_encoder->get_config = intel_dsi_get_config;
570
571 intel_connector->get_hw_state = intel_connector_get_hw_state;
572
573 for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
574 dsi = &intel_dsi_devices[i];
575 intel_dsi->dev = *dsi;
576
577 if (dsi->dev_ops->init(&intel_dsi->dev))
578 break;
579 }
580
581 if (i == ARRAY_SIZE(intel_dsi_devices)) {
582 DRM_DEBUG_KMS("no device found\n");
583 goto err;
584 }
585
586 intel_encoder->type = INTEL_OUTPUT_DSI;
587 intel_encoder->crtc_mask = (1 << 0); /* XXX */
588
589 intel_encoder->cloneable = false;
590 drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
591 DRM_MODE_CONNECTOR_DSI);
592
593 drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
594
595 connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
596 connector->interlace_allowed = false;
597 connector->doublescan_allowed = false;
598
599 intel_connector_attach_encoder(intel_connector, intel_encoder);
600
601 drm_sysfs_connector_add(connector);
602
603 fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
604 if (!fixed_mode) {
605 DRM_DEBUG_KMS("no fixed mode\n");
606 goto err;
607 }
608
609 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
610 intel_panel_init(&intel_connector->panel, fixed_mode);
611
612 return true;
613
614err:
615 drm_encoder_cleanup(&intel_encoder->base);
616 kfree(intel_dsi);
617 kfree(intel_connector);
618
619 return false;
620}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 000000000000..c7765f33d524
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _INTEL_DSI_H
25#define _INTEL_DSI_H
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include "intel_drv.h"
30
31struct intel_dsi_device {
32 unsigned int panel_id;
33 const char *name;
34 int type;
35 const struct intel_dsi_dev_ops *dev_ops;
36 void *dev_priv;
37};
38
39struct intel_dsi_dev_ops {
40 bool (*init)(struct intel_dsi_device *dsi);
41
42 /* This callback must be able to assume DSI commands can be sent */
43 void (*enable)(struct intel_dsi_device *dsi);
44
45 /* This callback must be able to assume DSI commands can be sent */
46 void (*disable)(struct intel_dsi_device *dsi);
47
48 int (*mode_valid)(struct intel_dsi_device *dsi,
49 struct drm_display_mode *mode);
50
51 bool (*mode_fixup)(struct intel_dsi_device *dsi,
52 const struct drm_display_mode *mode,
53 struct drm_display_mode *adjusted_mode);
54
55 void (*mode_set)(struct intel_dsi_device *dsi,
56 struct drm_display_mode *mode,
57 struct drm_display_mode *adjusted_mode);
58
59 enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
60
61 bool (*get_hw_state)(struct intel_dsi_device *dev);
62
63 struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
64
65 void (*destroy) (struct intel_dsi_device *dsi);
66};
67
68struct intel_dsi {
69 struct intel_encoder base;
70
71 struct intel_dsi_device dev;
72
73 struct intel_connector *attached_connector;
74
75 /* if true, use HS mode, otherwise LP */
76 bool hs;
77
78 /* virtual channel */
79 int channel;
80
81 /* number of DSI lanes */
82 unsigned int lane_count;
83
84 /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
85 u32 pixel_format;
86
87 /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
88 u32 video_mode_format;
89
90 /* eot for MIPI_EOT_DISABLE register */
91 u32 eot_disable;
92};
93
94static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
95{
96 return container_of(encoder, struct intel_dsi, base.base);
97}
98
99extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
100extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
101
102#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 000000000000..7c40f981d2c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,427 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#include <linux/export.h>
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include <video/mipi_display.h>
30#include "i915_drv.h"
31#include "intel_drv.h"
32#include "intel_dsi.h"
33#include "intel_dsi_cmd.h"
34
35/*
36 * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
37 * MIPI_COMMAND_ADDRESS registers.
38 *
39 * Apparently these registers provide a MIPI adapter level way to send (lots of)
40 * commands and data to the receiver, without having to write the commands and
41 * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
42 *
43 * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
44 * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
45 * framebuffer in command mode displays) these are just an optimization that can
46 * come later.
47 *
48 * For memory writes, these should probably be used for performance.
49 */
50
51static void print_stat(struct intel_dsi *intel_dsi)
52{
53 struct drm_encoder *encoder = &intel_dsi->base.base;
54 struct drm_device *dev = encoder->dev;
55 struct drm_i915_private *dev_priv = dev->dev_private;
56 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
57 enum pipe pipe = intel_crtc->pipe;
58 u32 val;
59
60 val = I915_READ(MIPI_INTR_STAT(pipe));
61
62#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
63 DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
64 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
65 "\n", pipe, val,
66 STAT_BIT(val, TEARING_EFFECT),
67 STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
68 STAT_BIT(val, GEN_READ_DATA_AVAIL),
69 STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
70 STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
71 STAT_BIT(val, RX_PROT_VIOLATION),
72 STAT_BIT(val, RX_INVALID_TX_LENGTH),
73 STAT_BIT(val, ACK_WITH_NO_ERROR),
74 STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
75 STAT_BIT(val, LP_RX_TIMEOUT),
76 STAT_BIT(val, HS_TX_TIMEOUT),
77 STAT_BIT(val, DPI_FIFO_UNDERRUN),
78 STAT_BIT(val, LOW_CONTENTION),
79 STAT_BIT(val, HIGH_CONTENTION),
80 STAT_BIT(val, TXDSI_VC_ID_INVALID),
81 STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
82 STAT_BIT(val, TXCHECKSUM_ERROR),
83 STAT_BIT(val, TXECC_MULTIBIT_ERROR),
84 STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
85 STAT_BIT(val, TXFALSE_CONTROL_ERROR),
86 STAT_BIT(val, RXDSI_VC_ID_INVALID),
87 STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
88 STAT_BIT(val, RXCHECKSUM_ERROR),
89 STAT_BIT(val, RXECC_MULTIBIT_ERROR),
90 STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
91 STAT_BIT(val, RXFALSE_CONTROL_ERROR),
92 STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
93 STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
94 STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
95 STAT_BIT(val, RXEOT_SYNC_ERROR),
96 STAT_BIT(val, RXSOT_SYNC_ERROR),
97 STAT_BIT(val, RXSOT_ERROR));
98#undef STAT_BIT
99}
100
101enum dsi_type {
102 DSI_DCS,
103 DSI_GENERIC,
104};
105
106/* enable or disable command mode hs transmissions */
107void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
108{
109 struct drm_encoder *encoder = &intel_dsi->base.base;
110 struct drm_device *dev = encoder->dev;
111 struct drm_i915_private *dev_priv = dev->dev_private;
112 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
113 enum pipe pipe = intel_crtc->pipe;
114 u32 temp;
115 u32 mask = DBI_FIFO_EMPTY;
116
117 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
118 DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
119
120 temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
121 temp &= DBI_HS_LP_MODE_MASK;
122 I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
123
124 intel_dsi->hs = enable;
125}
126
127static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
128 u8 data_type, u16 data)
129{
130 struct drm_encoder *encoder = &intel_dsi->base.base;
131 struct drm_device *dev = encoder->dev;
132 struct drm_i915_private *dev_priv = dev->dev_private;
133 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
134 enum pipe pipe = intel_crtc->pipe;
135 u32 ctrl_reg;
136 u32 ctrl;
137 u32 mask;
138
139 DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
140 channel, data_type, data);
141
142 if (intel_dsi->hs) {
143 ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
144 mask = HS_CTRL_FIFO_FULL;
145 } else {
146 ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
147 mask = LP_CTRL_FIFO_FULL;
148 }
149
150 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
151 DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
152 print_stat(intel_dsi);
153 }
154
155 /*
156 * Note: This function is also used for long packets, with length passed
157 * as data, since SHORT_PACKET_PARAM_SHIFT ==
158 * LONG_PACKET_WORD_COUNT_SHIFT.
159 */
160 ctrl = data << SHORT_PACKET_PARAM_SHIFT |
161 channel << VIRTUAL_CHANNEL_SHIFT |
162 data_type << DATA_TYPE_SHIFT;
163
164 I915_WRITE(ctrl_reg, ctrl);
165
166 return 0;
167}
168
169static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
170 u8 data_type, const u8 *data, int len)
171{
172 struct drm_encoder *encoder = &intel_dsi->base.base;
173 struct drm_device *dev = encoder->dev;
174 struct drm_i915_private *dev_priv = dev->dev_private;
175 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
176 enum pipe pipe = intel_crtc->pipe;
177 u32 data_reg;
178 int i, j, n;
179 u32 mask;
180
181 DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
182 channel, data_type, len);
183
184 if (intel_dsi->hs) {
185 data_reg = MIPI_HS_GEN_DATA(pipe);
186 mask = HS_DATA_FIFO_FULL;
187 } else {
188 data_reg = MIPI_LP_GEN_DATA(pipe);
189 mask = LP_DATA_FIFO_FULL;
190 }
191
192 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
193 DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
194
195 for (i = 0; i < len; i += n) {
196 u32 val = 0;
197 n = min_t(int, len - i, 4);
198
199 for (j = 0; j < n; j++)
200 val |= *data++ << 8 * j;
201
202 I915_WRITE(data_reg, val);
203 /* XXX: check for data fifo full, once that is set, write 4
204 * dwords, then wait for not set, then continue. */
205 }
206
207 return dsi_vc_send_short(intel_dsi, channel, data_type, len);
208}
209
210static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
211 int channel, const u8 *data, int len,
212 enum dsi_type type)
213{
214 int ret;
215
216 if (len == 0) {
217 BUG_ON(type == DSI_GENERIC);
218 ret = dsi_vc_send_short(intel_dsi, channel,
219 MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
220 0);
221 } else if (len == 1) {
222 ret = dsi_vc_send_short(intel_dsi, channel,
223 type == DSI_GENERIC ?
224 MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
225 MIPI_DSI_DCS_SHORT_WRITE, data[0]);
226 } else if (len == 2) {
227 ret = dsi_vc_send_short(intel_dsi, channel,
228 type == DSI_GENERIC ?
229 MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
230 MIPI_DSI_DCS_SHORT_WRITE_PARAM,
231 (data[1] << 8) | data[0]);
232 } else {
233 ret = dsi_vc_send_long(intel_dsi, channel,
234 type == DSI_GENERIC ?
235 MIPI_DSI_GENERIC_LONG_WRITE :
236 MIPI_DSI_DCS_LONG_WRITE, data, len);
237 }
238
239 return ret;
240}
241
242int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
243 const u8 *data, int len)
244{
245 return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
246}
247
248int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
249 const u8 *data, int len)
250{
251 return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
252}
253
254static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
255 int channel, u8 dcs_cmd)
256{
257 return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
258 dcs_cmd);
259}
260
261static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
262 int channel, u8 *reqdata,
263 int reqlen)
264{
265 u16 data;
266 u8 data_type;
267
268 switch (reqlen) {
269 case 0:
270 data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
271 data = 0;
272 break;
273 case 1:
274 data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
275 data = reqdata[0];
276 break;
277 case 2:
278 data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
279 data = (reqdata[1] << 8) | reqdata[0];
280 break;
281 default:
282 BUG();
283 }
284
285 return dsi_vc_send_short(intel_dsi, channel, data_type, data);
286}
287
288static int dsi_read_data_return(struct intel_dsi *intel_dsi,
289 u8 *buf, int buflen)
290{
291 struct drm_encoder *encoder = &intel_dsi->base.base;
292 struct drm_device *dev = encoder->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
295 enum pipe pipe = intel_crtc->pipe;
296 int i, len = 0;
297 u32 data_reg, val;
298
299 if (intel_dsi->hs) {
300 data_reg = MIPI_HS_GEN_DATA(pipe);
301 } else {
302 data_reg = MIPI_LP_GEN_DATA(pipe);
303 }
304
305 while (len < buflen) {
306 val = I915_READ(data_reg);
307 for (i = 0; i < 4 && len < buflen; i++, len++)
308 buf[len] = val >> 8 * i;
309 }
310
311 return len;
312}
313
314int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
315 u8 *buf, int buflen)
316{
317 struct drm_encoder *encoder = &intel_dsi->base.base;
318 struct drm_device *dev = encoder->dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
321 enum pipe pipe = intel_crtc->pipe;
322 u32 mask;
323 int ret;
324
325 /*
326 * XXX: should issue multiple read requests and reads if request is
327 * longer than MIPI_MAX_RETURN_PKT_SIZE
328 */
329
330 I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
331
332 ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
333 if (ret)
334 return ret;
335
336 mask = GEN_READ_DATA_AVAIL;
337 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
338 DRM_ERROR("Timeout waiting for read data.\n");
339
340 ret = dsi_read_data_return(intel_dsi, buf, buflen);
341 if (ret < 0)
342 return ret;
343
344 if (ret != buflen)
345 return -EIO;
346
347 return 0;
348}
349
350int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
351 u8 *reqdata, int reqlen, u8 *buf, int buflen)
352{
353 struct drm_encoder *encoder = &intel_dsi->base.base;
354 struct drm_device *dev = encoder->dev;
355 struct drm_i915_private *dev_priv = dev->dev_private;
356 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
357 enum pipe pipe = intel_crtc->pipe;
358 u32 mask;
359 int ret;
360
361 /*
362 * XXX: should issue multiple read requests and reads if request is
363 * longer than MIPI_MAX_RETURN_PKT_SIZE
364 */
365
366 I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
367
368 ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
369 reqlen);
370 if (ret)
371 return ret;
372
373 mask = GEN_READ_DATA_AVAIL;
374 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
375 DRM_ERROR("Timeout waiting for read data.\n");
376
377 ret = dsi_read_data_return(intel_dsi, buf, buflen);
378 if (ret < 0)
379 return ret;
380
381 if (ret != buflen)
382 return -EIO;
383
384 return 0;
385}
386
387/*
388 * send a video mode command
389 *
390 * XXX: commands with data in MIPI_DPI_DATA?
391 */
392int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
393{
394 struct drm_encoder *encoder = &intel_dsi->base.base;
395 struct drm_device *dev = encoder->dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
398 enum pipe pipe = intel_crtc->pipe;
399 u32 mask;
400
401 /* XXX: pipe, hs */
402 if (intel_dsi->hs)
403 cmd &= ~DPI_LP_MODE;
404 else
405 cmd |= DPI_LP_MODE;
406
407 /* DPI virtual channel?! */
408
409 mask = DPI_FIFO_EMPTY;
410 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
411 DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
412
413 /* clear bit */
414 I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
415
416 /* XXX: old code skips write if control unchanged */
417 if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
418 DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
419
420 I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
421
422 mask = SPL_PKT_SENT_INTERRUPT;
423 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
424 DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
425
426 return 0;
427}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 000000000000..54c8a234a2e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#ifndef _INTEL_DSI_DSI_H
27#define _INTEL_DSI_DSI_H
28
29#include <drm/drmP.h>
30#include <drm/drm_crtc.h>
31#include <video/mipi_display.h>
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include "intel_dsi.h"
35
36void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
37
38int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
39 const u8 *data, int len);
40
41int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
42 const u8 *data, int len);
43
44int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
45 u8 *buf, int buflen);
46
47int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
48 u8 *reqdata, int reqlen, u8 *buf, int buflen);
49
50int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
51
52/* XXX: questionable write helpers */
53static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
54 int channel, u8 dcs_cmd)
55{
56 return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
57}
58
59static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
60 int channel, u8 dcs_cmd, u8 param)
61{
62 u8 buf[2] = { dcs_cmd, param };
63 return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
64}
65
66static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
67 int channel)
68{
69 return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
70}
71
72static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
73 int channel, u8 param)
74{
75 return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
76}
77
78static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
79 int channel, u8 param1, u8 param2)
80{
81 u8 buf[2] = { param1, param2 };
82 return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
83}
84
85/* XXX: questionable read helpers */
86static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
87 int channel, u8 *buf, int buflen)
88{
89 return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
90}
91
92static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
93 int channel, u8 param, u8 *buf,
94 int buflen)
95{
96 return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
97}
98
99static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
100 int channel, u8 param1, u8 param2,
101 u8 *buf, int buflen)
102{
103 u8 req[2] = { param1, param2 };
104
105 return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
106}
107
108
109#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 000000000000..44279b2ade88
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Shobhit Kumar <shobhit.kumar@intel.com>
25 * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
26 */
27
28#include <linux/kernel.h>
29#include "intel_drv.h"
30#include "i915_drv.h"
31#include "intel_dsi.h"
32
33#define DSI_HSS_PACKET_SIZE 4
34#define DSI_HSE_PACKET_SIZE 4
35#define DSI_HSA_PACKET_EXTRA_SIZE 6
36#define DSI_HBP_PACKET_EXTRA_SIZE 6
37#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
38#define DSI_HFP_PACKET_EXTRA_SIZE 6
39#define DSI_EOTP_PACKET_SIZE 4
40
41struct dsi_mnp {
42 u32 dsi_pll_ctrl;
43 u32 dsi_pll_div;
44};
45
46static const u32 lfsr_converts[] = {
47 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
48 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
49 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
50 71, 35 /* 91 - 92 */
51};
52
53static u32 dsi_rr_formula(const struct drm_display_mode *mode,
54 int pixel_format, int video_mode_format,
55 int lane_count, bool eotp)
56{
57 u32 bpp;
58 u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
59 u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
60 u32 bytes_per_line, bytes_per_frame;
61 u32 num_frames;
62 u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
63 u32 dsi_bit_clock_hz;
64 u32 dsi_clk;
65
66 switch (pixel_format) {
67 default:
68 case VID_MODE_FORMAT_RGB888:
69 case VID_MODE_FORMAT_RGB666_LOOSE:
70 bpp = 24;
71 break;
72 case VID_MODE_FORMAT_RGB666:
73 bpp = 18;
74 break;
75 case VID_MODE_FORMAT_RGB565:
76 bpp = 16;
77 break;
78 }
79
80 hactive = mode->hdisplay;
81 vactive = mode->vdisplay;
82 hfp = mode->hsync_start - mode->hdisplay;
83 hsync = mode->hsync_end - mode->hsync_start;
84 hbp = mode->htotal - mode->hsync_end;
85
86 vfp = mode->vsync_start - mode->vdisplay;
87 vsync = mode->vsync_end - mode->vsync_start;
88 vbp = mode->vtotal - mode->vsync_end;
89
90 hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
91 hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
92 hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
93 hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
94
95 bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
96 DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
97 hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
98 hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
99 hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
100
101 /*
102 * XXX: Need to accurately calculate LP to HS transition timeout and add
103 * it to bytes_per_line/bytes_per_frame.
104 */
105
106 if (eotp && video_mode_format == VIDEO_MODE_BURST)
107 bytes_per_line += DSI_EOTP_PACKET_SIZE;
108
109 bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
110 vactive * bytes_per_line + vfp * bytes_per_line;
111
112 if (eotp &&
113 (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
114 video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
115 bytes_per_frame += DSI_EOTP_PACKET_SIZE;
116
117 num_frames = drm_mode_vrefresh(mode);
118 bytes_per_x_frames = num_frames * bytes_per_frame;
119
120 bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
121
122 /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
123 dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
124 dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
125
126 if (eotp && video_mode_format == VIDEO_MODE_BURST)
127 dsi_clk *= 2;
128
129 return dsi_clk;
130}
131
132#ifdef MNP_FROM_TABLE
133
134struct dsi_clock_table {
135 u32 freq;
136 u8 m;
137 u8 p;
138};
139
140static const struct dsi_clock_table dsi_clk_tbl[] = {
141 {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
142 {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
143 {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
144 {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
145 {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
146 {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
147 {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
148 {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
149 {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
150 {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
151 {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
152 {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
153 {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
154 {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
155 {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
156 {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
157 {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
158 {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
159 {1000, 80, 2}, /* dsi clock frequency in Mhz*/
160};
161
162static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
163{
164 unsigned int i;
165 u8 m;
166 u8 n;
167 u8 p;
168 u32 m_seed;
169
170 if (dsi_clk < 300 || dsi_clk > 1000)
171 return -ECHRNG;
172
173 for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
174 if (dsi_clk_tbl[i].freq > dsi_clk)
175 break;
176 }
177
178 m = dsi_clk_tbl[i].m;
179 p = dsi_clk_tbl[i].p;
180 m_seed = lfsr_converts[m - 62];
181 n = 1;
182 dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
183 dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
184 m_seed << DSI_PLL_M1_DIV_SHIFT;
185
186 return 0;
187}
188
189#else
190
191static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
192{
193 u32 m, n, p;
194 u32 ref_clk;
195 u32 error;
196 u32 tmp_error;
197 u32 target_dsi_clk;
198 u32 calc_dsi_clk;
199 u32 calc_m;
200 u32 calc_p;
201 u32 m_seed;
202
203 if (dsi_clk < 300 || dsi_clk > 1150) {
204 DRM_ERROR("DSI CLK Out of Range\n");
205 return -ECHRNG;
206 }
207
208 ref_clk = 25000;
209 target_dsi_clk = dsi_clk * 1000;
210 error = 0xFFFFFFFF;
211 calc_m = 0;
212 calc_p = 0;
213
214 for (m = 62; m <= 92; m++) {
215 for (p = 2; p <= 6; p++) {
216
217 calc_dsi_clk = (m * ref_clk) / p;
218 if (calc_dsi_clk >= target_dsi_clk) {
219 tmp_error = calc_dsi_clk - target_dsi_clk;
220 if (tmp_error < error) {
221 error = tmp_error;
222 calc_m = m;
223 calc_p = p;
224 }
225 }
226 }
227 }
228
229 m_seed = lfsr_converts[calc_m - 62];
230 n = 1;
231 dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
232 dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
233 m_seed << DSI_PLL_M1_DIV_SHIFT;
234
235 return 0;
236}
237
238#endif
239
240/*
241 * XXX: The muxing and gating is hard coded for now. Need to add support for
242 * sharing PLLs with two DSI outputs.
243 */
244static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
245{
246 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
247 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
248 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
249 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
250 int ret;
251 struct dsi_mnp dsi_mnp;
252 u32 dsi_clk;
253
254 dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
255 intel_dsi->video_mode_format,
256 intel_dsi->lane_count, !intel_dsi->eot_disable);
257
258 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
259 if (ret) {
260 DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
261 return;
262 }
263
264 dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
265
266 DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
267 dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
268
269 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
270 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
271 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
272}
273
274void vlv_enable_dsi_pll(struct intel_encoder *encoder)
275{
276 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
277 u32 tmp;
278
279 DRM_DEBUG_KMS("\n");
280
281 mutex_lock(&dev_priv->dpio_lock);
282
283 vlv_configure_dsi_pll(encoder);
284
285 /* wait at least 0.5 us after ungating before enabling VCO */
286 usleep_range(1, 10);
287
288 tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
289 tmp |= DSI_PLL_VCO_EN;
290 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
291
292 mutex_unlock(&dev_priv->dpio_lock);
293
294 if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
295 DRM_ERROR("DSI PLL lock failed\n");
296 return;
297 }
298
299 DRM_DEBUG_KMS("DSI PLL locked\n");
300}
301
302void vlv_disable_dsi_pll(struct intel_encoder *encoder)
303{
304 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
305 u32 tmp;
306
307 DRM_DEBUG_KMS("\n");
308
309 mutex_lock(&dev_priv->dpio_lock);
310
311 tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
312 tmp &= ~DSI_PLL_VCO_EN;
313 tmp |= DSI_PLL_LDO_GATE;
314 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
315
316 mutex_unlock(&dev_priv->dpio_lock);
317}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fa7df546c1e..1b64145c669a 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
153 flags |= DRM_MODE_FLAG_NVSYNC; 153 flags |= DRM_MODE_FLAG_NVSYNC;
154 154
155 pipe_config->adjusted_mode.flags |= flags; 155 pipe_config->adjusted_mode.flags |= flags;
156
157 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
156} 158}
157 159
158static void intel_disable_dvo(struct intel_encoder *encoder) 160static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -267,11 +269,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
267 drm_mode_set_crtcinfo(adjusted_mode, 0); 269 drm_mode_set_crtcinfo(adjusted_mode, 0);
268 } 270 }
269 271
270 if (intel_dvo->dev.dev_ops->mode_fixup)
271 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
272 &pipe_config->requested_mode,
273 adjusted_mode);
274
275 return true; 272 return true;
276} 273}
277 274
@@ -370,7 +367,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
370 367
371static void intel_dvo_destroy(struct drm_connector *connector) 368static void intel_dvo_destroy(struct drm_connector *connector)
372{ 369{
373 drm_sysfs_connector_remove(connector);
374 drm_connector_cleanup(connector); 370 drm_connector_cleanup(connector);
375 kfree(connector); 371 kfree(connector);
376} 372}
@@ -451,11 +447,11 @@ void intel_dvo_init(struct drm_device *dev)
451 int i; 447 int i;
452 int encoder_type = DRM_MODE_ENCODER_NONE; 448 int encoder_type = DRM_MODE_ENCODER_NONE;
453 449
454 intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL); 450 intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
455 if (!intel_dvo) 451 if (!intel_dvo)
456 return; 452 return;
457 453
458 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 454 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
459 if (!intel_connector) { 455 if (!intel_connector) {
460 kfree(intel_dvo); 456 kfree(intel_dvo);
461 return; 457 return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index bc2100007b21..d883b77b1b78 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -184,6 +184,27 @@ out:
184 return ret; 184 return ret;
185} 185}
186 186
187/** Sets the color ramps on behalf of RandR */
188static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
189 u16 blue, int regno)
190{
191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
192
193 intel_crtc->lut_r[regno] = red >> 8;
194 intel_crtc->lut_g[regno] = green >> 8;
195 intel_crtc->lut_b[regno] = blue >> 8;
196}
197
198static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
199 u16 *blue, int regno)
200{
201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
202
203 *red = intel_crtc->lut_r[regno] << 8;
204 *green = intel_crtc->lut_g[regno] << 8;
205 *blue = intel_crtc->lut_b[regno] << 8;
206}
207
187static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 208static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
188 .gamma_set = intel_crtc_fb_gamma_set, 209 .gamma_set = intel_crtc_fb_gamma_set,
189 .gamma_get = intel_crtc_fb_gamma_get, 210 .gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
216 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
217 int ret; 238 int ret;
218 239
219 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); 240 ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
220 if (!ifbdev) 241 if (!ifbdev)
221 return -ENOMEM; 242 return -ENOMEM;
222 243
@@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
225 246
226 ret = drm_fb_helper_init(dev, &ifbdev->helper, 247 ret = drm_fb_helper_init(dev, &ifbdev->helper,
227 INTEL_INFO(dev)->num_pipes, 248 INTEL_INFO(dev)->num_pipes,
228 INTELFB_CONN_LIMIT); 249 4);
229 if (ret) { 250 if (ret) {
230 kfree(ifbdev); 251 kfree(ifbdev);
231 return ret; 252 return ret;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4148cc85bf7f..4f4d346db8f0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -713,6 +713,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
713 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 713 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
714 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 714 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
715 u32 tmp, flags = 0; 715 u32 tmp, flags = 0;
716 int dotclock;
716 717
717 tmp = I915_READ(intel_hdmi->hdmi_reg); 718 tmp = I915_READ(intel_hdmi->hdmi_reg);
718 719
@@ -727,6 +728,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
727 flags |= DRM_MODE_FLAG_NVSYNC; 728 flags |= DRM_MODE_FLAG_NVSYNC;
728 729
729 pipe_config->adjusted_mode.flags |= flags; 730 pipe_config->adjusted_mode.flags |= flags;
731
732 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
733 dotclock = pipe_config->port_clock * 2 / 3;
734 else
735 dotclock = pipe_config->port_clock;
736
737 if (HAS_PCH_SPLIT(dev_priv->dev))
738 ironlake_check_encoder_dotclock(pipe_config, dotclock);
739
740 pipe_config->adjusted_mode.crtc_clock = dotclock;
730} 741}
731 742
732static void intel_enable_hdmi(struct intel_encoder *encoder) 743static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -862,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
862 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 873 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
863 struct drm_device *dev = encoder->base.dev; 874 struct drm_device *dev = encoder->base.dev;
864 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 875 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
865 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; 876 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
866 int portclock_limit = hdmi_portclock_limit(intel_hdmi); 877 int portclock_limit = hdmi_portclock_limit(intel_hdmi);
867 int desired_bpp; 878 int desired_bpp;
868 879
@@ -904,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
904 pipe_config->pipe_bpp = desired_bpp; 915 pipe_config->pipe_bpp = desired_bpp;
905 } 916 }
906 917
907 if (adjusted_mode->clock > portclock_limit) { 918 if (adjusted_mode->crtc_clock > portclock_limit) {
908 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); 919 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
909 return false; 920 return false;
910 } 921 }
@@ -1079,35 +1090,35 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1079 1090
1080 /* Enable clock channels for this port */ 1091 /* Enable clock channels for this port */
1081 mutex_lock(&dev_priv->dpio_lock); 1092 mutex_lock(&dev_priv->dpio_lock);
1082 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1093 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1083 val = 0; 1094 val = 0;
1084 if (pipe) 1095 if (pipe)
1085 val |= (1<<21); 1096 val |= (1<<21);
1086 else 1097 else
1087 val &= ~(1<<21); 1098 val &= ~(1<<21);
1088 val |= 0x001000c4; 1099 val |= 0x001000c4;
1089 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1100 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1090 1101
1091 /* HDMI 1.0V-2dB */ 1102 /* HDMI 1.0V-2dB */
1092 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0); 1103 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
1093 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), 1104 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
1094 0x2b245f5f); 1105 0x2b245f5f);
1095 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 1106 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
1096 0x5578b83a); 1107 0x5578b83a);
1097 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 1108 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
1098 0x0c782040); 1109 0x0c782040);
1099 vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port), 1110 vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
1100 0x2b247878); 1111 0x2b247878);
1101 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 1112 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
1102 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1113 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
1103 0x00002000); 1114 0x00002000);
1104 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1115 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
1105 DPIO_TX_OCALINIT_EN); 1116 DPIO_TX_OCALINIT_EN);
1106 1117
1107 /* Program lane clock */ 1118 /* Program lane clock */
1108 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 1119 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
1109 0x00760018); 1120 0x00760018);
1110 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1121 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
1111 0x00400888); 1122 0x00400888);
1112 mutex_unlock(&dev_priv->dpio_lock); 1123 mutex_unlock(&dev_priv->dpio_lock);
1113 1124
@@ -1121,30 +1132,33 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1121 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1132 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1122 struct drm_device *dev = encoder->base.dev; 1133 struct drm_device *dev = encoder->base.dev;
1123 struct drm_i915_private *dev_priv = dev->dev_private; 1134 struct drm_i915_private *dev_priv = dev->dev_private;
1135 struct intel_crtc *intel_crtc =
1136 to_intel_crtc(encoder->base.crtc);
1124 int port = vlv_dport_to_channel(dport); 1137 int port = vlv_dport_to_channel(dport);
1138 int pipe = intel_crtc->pipe;
1125 1139
1126 if (!IS_VALLEYVIEW(dev)) 1140 if (!IS_VALLEYVIEW(dev))
1127 return; 1141 return;
1128 1142
1129 /* Program Tx lane resets to default */ 1143 /* Program Tx lane resets to default */
1130 mutex_lock(&dev_priv->dpio_lock); 1144 mutex_lock(&dev_priv->dpio_lock);
1131 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1145 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1132 DPIO_PCS_TX_LANE2_RESET | 1146 DPIO_PCS_TX_LANE2_RESET |
1133 DPIO_PCS_TX_LANE1_RESET); 1147 DPIO_PCS_TX_LANE1_RESET);
1134 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1148 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1135 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1149 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1136 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1150 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1137 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1151 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1138 DPIO_PCS_CLK_SOFT_RESET); 1152 DPIO_PCS_CLK_SOFT_RESET);
1139 1153
1140 /* Fix up inter-pair skew failure */ 1154 /* Fix up inter-pair skew failure */
1141 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1155 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1142 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1156 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1143 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1157 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1144 1158
1145 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1159 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
1146 0x00002000); 1160 0x00002000);
1147 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1161 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
1148 DPIO_TX_OCALINIT_EN); 1162 DPIO_TX_OCALINIT_EN);
1149 mutex_unlock(&dev_priv->dpio_lock); 1163 mutex_unlock(&dev_priv->dpio_lock);
1150} 1164}
@@ -1153,18 +1167,20 @@ static void intel_hdmi_post_disable(struct intel_encoder *encoder)
1153{ 1167{
1154 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1168 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1155 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1169 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1170 struct intel_crtc *intel_crtc =
1171 to_intel_crtc(encoder->base.crtc);
1156 int port = vlv_dport_to_channel(dport); 1172 int port = vlv_dport_to_channel(dport);
1173 int pipe = intel_crtc->pipe;
1157 1174
1158 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1175 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1159 mutex_lock(&dev_priv->dpio_lock); 1176 mutex_lock(&dev_priv->dpio_lock);
1160 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000); 1177 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
1161 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060); 1178 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
1162 mutex_unlock(&dev_priv->dpio_lock); 1179 mutex_unlock(&dev_priv->dpio_lock);
1163} 1180}
1164 1181
1165static void intel_hdmi_destroy(struct drm_connector *connector) 1182static void intel_hdmi_destroy(struct drm_connector *connector)
1166{ 1183{
1167 drm_sysfs_connector_remove(connector);
1168 drm_connector_cleanup(connector); 1184 drm_connector_cleanup(connector);
1169 kfree(connector); 1185 kfree(connector);
1170} 1186}
@@ -1211,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1211 1227
1212 connector->interlace_allowed = 1; 1228 connector->interlace_allowed = 1;
1213 connector->doublescan_allowed = 0; 1229 connector->doublescan_allowed = 0;
1230 connector->stereo_allowed = 1;
1214 1231
1215 switch (port) { 1232 switch (port) {
1216 case PORT_B: 1233 case PORT_B:
@@ -1275,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1275 struct intel_encoder *intel_encoder; 1292 struct intel_encoder *intel_encoder;
1276 struct intel_connector *intel_connector; 1293 struct intel_connector *intel_connector;
1277 1294
1278 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1295 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1279 if (!intel_dig_port) 1296 if (!intel_dig_port)
1280 return; 1297 return;
1281 1298
1282 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1299 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1283 if (!intel_connector) { 1300 if (!intel_connector) {
1284 kfree(intel_dig_port); 1301 kfree(intel_dig_port);
1285 return; 1302 return;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7f262..2ca17b14b6c1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
34#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37enum disp_clk {
38 CDCLK,
39 CZCLK
40};
41
37struct gmbus_port { 42struct gmbus_port {
38 const char *name; 43 const char *name;
39 int reg; 44 int reg;
@@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
58 return container_of(i2c, struct intel_gmbus, adapter); 63 return container_of(i2c, struct intel_gmbus, adapter);
59} 64}
60 65
66static int get_disp_clk_div(struct drm_i915_private *dev_priv,
67 enum disp_clk clk)
68{
69 u32 reg_val;
70 int clk_ratio;
71
72 reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
73
74 if (clk == CDCLK)
75 clk_ratio =
76 ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
77 else
78 clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
79
80 return clk_ratio;
81}
82
83static void gmbus_set_freq(struct drm_i915_private *dev_priv)
84{
85 int vco_freq[] = { 800, 1600, 2000, 2400 };
86 int gmbus_freq = 0, cdclk_div, hpll_freq;
87
88 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
89
90 /* Skip setting the gmbus freq if BIOS has already programmed it */
91 if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
92 return;
93
94 /* Obtain SKU information */
95 mutex_lock(&dev_priv->dpio_lock);
96 hpll_freq =
97 vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
98 mutex_unlock(&dev_priv->dpio_lock);
99
100 /* Get the CDCLK divide ratio */
101 cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
102
103 /*
104 * Program the gmbus_freq based on the cdclk frequency.
105 * BSpec erroneously claims we should aim for 4MHz, but
106 * in fact 1MHz is the correct frequency.
107 */
108 if (cdclk_div)
109 gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
110
111 if (WARN_ON(gmbus_freq == 0))
112 return;
113
114 I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
115}
116
61void 117void
62intel_i2c_reset(struct drm_device *dev) 118intel_i2c_reset(struct drm_device *dev)
63{ 119{
64 struct drm_i915_private *dev_priv = dev->dev_private; 120 struct drm_i915_private *dev_priv = dev->dev_private;
121
122 /*
123 * In BIOS-less system, program the correct gmbus frequency
124 * before reading edid.
125 */
126 if (IS_VALLEYVIEW(dev))
127 gmbus_set_freq(dev_priv);
128
65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 129 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 130 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
67} 131}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 831a5c021c4b..ae0c843dd263 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
92 struct drm_device *dev = encoder->base.dev; 92 struct drm_device *dev = encoder->base.dev;
93 struct drm_i915_private *dev_priv = dev->dev_private; 93 struct drm_i915_private *dev_priv = dev->dev_private;
94 u32 lvds_reg, tmp, flags = 0; 94 u32 lvds_reg, tmp, flags = 0;
95 int dotclock;
95 96
96 if (HAS_PCH_SPLIT(dev)) 97 if (HAS_PCH_SPLIT(dev))
97 lvds_reg = PCH_LVDS; 98 lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
116 117
117 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; 118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
118 } 119 }
120
121 dotclock = pipe_config->port_clock;
122
123 if (HAS_PCH_SPLIT(dev_priv->dev))
124 ironlake_check_encoder_dotclock(pipe_config, dotclock);
125
126 pipe_config->adjusted_mode.crtc_clock = dotclock;
119} 127}
120 128
121/* The LVDS pin pair needs to be on before the DPLLs are enabled. 129/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -466,7 +474,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
466 474
467 intel_panel_fini(&lvds_connector->base.panel); 475 intel_panel_fini(&lvds_connector->base.panel);
468 476
469 drm_sysfs_connector_remove(connector);
470 drm_connector_cleanup(connector); 477 drm_connector_cleanup(connector);
471 kfree(connector); 478 kfree(connector);
472} 479}
@@ -786,7 +793,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
786 return true; 793 return true;
787 794
788 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 795 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
789 struct child_device_config *child = dev_priv->vbt.child_dev + i; 796 union child_device_config *uchild = dev_priv->vbt.child_dev + i;
797 struct old_child_dev_config *child = &uchild->old;
790 798
791 /* If the device type is not LFP, continue. 799 /* If the device type is not LFP, continue.
792 * We have to check both the new identifiers as well as the 800 * We have to check both the new identifiers as well as the
@@ -940,11 +948,11 @@ void intel_lvds_init(struct drm_device *dev)
940 } 948 }
941 } 949 }
942 950
943 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); 951 lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
944 if (!lvds_encoder) 952 if (!lvds_encoder)
945 return; 953 return;
946 954
947 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); 955 lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
948 if (!lvds_connector) { 956 if (!lvds_connector) {
949 kfree(lvds_encoder); 957 kfree(lvds_encoder);
950 return; 958 return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 119771ff46ab..2acf5cae20e4 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -36,8 +36,11 @@
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_drv.h" 37#include "intel_drv.h"
38 38
39#define PCI_ASLE 0xe4 39#define PCI_ASLE 0xe4
40#define PCI_ASLS 0xfc 40#define PCI_ASLS 0xfc
41#define PCI_SWSCI 0xe8
42#define PCI_SWSCI_SCISEL (1 << 15)
43#define PCI_SWSCI_GSSCIE (1 << 0)
41 44
42#define OPREGION_HEADER_OFFSET 0 45#define OPREGION_HEADER_OFFSET 0
43#define OPREGION_ACPI_OFFSET 0x100 46#define OPREGION_ACPI_OFFSET 0x100
@@ -107,25 +110,38 @@ struct opregion_asle {
107 u32 epfm; /* enabled panel fitting modes */ 110 u32 epfm; /* enabled panel fitting modes */
108 u8 plut[74]; /* panel LUT and identifier */ 111 u8 plut[74]; /* panel LUT and identifier */
109 u32 pfmb; /* PWM freq and min brightness */ 112 u32 pfmb; /* PWM freq and min brightness */
110 u8 rsvd[102]; 113 u32 cddv; /* color correction default values */
114 u32 pcft; /* power conservation features */
115 u32 srot; /* supported rotation angles */
116 u32 iuer; /* IUER events */
117 u8 rsvd[86];
111} __attribute__((packed)); 118} __attribute__((packed));
112 119
113/* Driver readiness indicator */ 120/* Driver readiness indicator */
114#define ASLE_ARDY_READY (1 << 0) 121#define ASLE_ARDY_READY (1 << 0)
115#define ASLE_ARDY_NOT_READY (0 << 0) 122#define ASLE_ARDY_NOT_READY (0 << 0)
116 123
117/* ASLE irq request bits */ 124/* ASLE Interrupt Command (ASLC) bits */
118#define ASLE_SET_ALS_ILLUM (1 << 0) 125#define ASLC_SET_ALS_ILLUM (1 << 0)
119#define ASLE_SET_BACKLIGHT (1 << 1) 126#define ASLC_SET_BACKLIGHT (1 << 1)
120#define ASLE_SET_PFIT (1 << 2) 127#define ASLC_SET_PFIT (1 << 2)
121#define ASLE_SET_PWM_FREQ (1 << 3) 128#define ASLC_SET_PWM_FREQ (1 << 3)
122#define ASLE_REQ_MSK 0xf 129#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
123 130#define ASLC_BUTTON_ARRAY (1 << 5)
124/* response bits of ASLE irq request */ 131#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
125#define ASLE_ALS_ILLUM_FAILED (1<<10) 132#define ASLC_DOCKING_INDICATOR (1 << 7)
126#define ASLE_BACKLIGHT_FAILED (1<<12) 133#define ASLC_ISCT_STATE_CHANGE (1 << 8)
127#define ASLE_PFIT_FAILED (1<<14) 134#define ASLC_REQ_MSK 0x1ff
128#define ASLE_PWM_FREQ_FAILED (1<<16) 135/* response bits */
136#define ASLC_ALS_ILLUM_FAILED (1 << 10)
137#define ASLC_BACKLIGHT_FAILED (1 << 12)
138#define ASLC_PFIT_FAILED (1 << 14)
139#define ASLC_PWM_FREQ_FAILED (1 << 16)
140#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
141#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
142#define ASLC_CONVERTIBLE_FAILED (1 << 22)
143#define ASLC_DOCKING_FAILED (1 << 24)
144#define ASLC_ISCT_STATE_FAILED (1 << 26)
129 145
130/* Technology enabled indicator */ 146/* Technology enabled indicator */
131#define ASLE_TCHE_ALS_EN (1 << 0) 147#define ASLE_TCHE_ALS_EN (1 << 0)
@@ -151,6 +167,60 @@ struct opregion_asle {
151 167
152#define ASLE_CBLV_VALID (1<<31) 168#define ASLE_CBLV_VALID (1<<31)
153 169
170/* IUER */
171#define ASLE_IUER_DOCKING (1 << 7)
172#define ASLE_IUER_CONVERTIBLE (1 << 6)
173#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
174#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
175#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
176#define ASLE_IUER_WINDOWS_BTN (1 << 1)
177#define ASLE_IUER_POWER_BTN (1 << 0)
178
179/* Software System Control Interrupt (SWSCI) */
180#define SWSCI_SCIC_INDICATOR (1 << 0)
181#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
182#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
183#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
184#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
185#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
186#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
187#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
188#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
189#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
190
191#define SWSCI_FUNCTION_CODE(main, sub) \
192 ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
193 (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
194
195/* SWSCI: Get BIOS Data (GBDA) */
196#define SWSCI_GBDA 4
197#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
198#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
199#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
200#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
201#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
202#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
203#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
204
205/* SWSCI: System BIOS Callbacks (SBCB) */
206#define SWSCI_SBCB 6
207#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
208#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
209#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
210#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
211#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
212#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
213#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
214#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
215#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
216#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
217#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
218#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
219#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
220#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
221#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
222#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
223
154#define ACPI_OTHER_OUTPUT (0<<8) 224#define ACPI_OTHER_OUTPUT (0<<8)
155#define ACPI_VGA_OUTPUT (1<<8) 225#define ACPI_VGA_OUTPUT (1<<8)
156#define ACPI_TV_OUTPUT (2<<8) 226#define ACPI_TV_OUTPUT (2<<8)
@@ -158,6 +228,169 @@ struct opregion_asle {
158#define ACPI_LVDS_OUTPUT (4<<8) 228#define ACPI_LVDS_OUTPUT (4<<8)
159 229
160#ifdef CONFIG_ACPI 230#ifdef CONFIG_ACPI
231static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
232{
233 struct drm_i915_private *dev_priv = dev->dev_private;
234 struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
235 u32 main_function, sub_function, scic;
236 u16 pci_swsci;
237 u32 dslp;
238
239 if (!swsci)
240 return -ENODEV;
241
242 main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
243 SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
244 sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
245 SWSCI_SCIC_SUB_FUNCTION_SHIFT;
246
247 /* Check if we can call the function. See swsci_setup for details. */
248 if (main_function == SWSCI_SBCB) {
249 if ((dev_priv->opregion.swsci_sbcb_sub_functions &
250 (1 << sub_function)) == 0)
251 return -EINVAL;
252 } else if (main_function == SWSCI_GBDA) {
253 if ((dev_priv->opregion.swsci_gbda_sub_functions &
254 (1 << sub_function)) == 0)
255 return -EINVAL;
256 }
257
258 /* Driver sleep timeout in ms. */
259 dslp = ioread32(&swsci->dslp);
260 if (!dslp) {
261 dslp = 2;
262 } else if (dslp > 500) {
263 /* Hey bios, trust must be earned. */
264 WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
265 dslp = 500;
266 }
267
268 /* The spec tells us to do this, but we are the only user... */
269 scic = ioread32(&swsci->scic);
270 if (scic & SWSCI_SCIC_INDICATOR) {
271 DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
272 return -EBUSY;
273 }
274
275 scic = function | SWSCI_SCIC_INDICATOR;
276
277 iowrite32(parm, &swsci->parm);
278 iowrite32(scic, &swsci->scic);
279
280 /* Ensure SCI event is selected and event trigger is cleared. */
281 pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
282 if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
283 pci_swsci |= PCI_SWSCI_SCISEL;
284 pci_swsci &= ~PCI_SWSCI_GSSCIE;
285 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
286 }
287
288 /* Use event trigger to tell bios to check the mail. */
289 pci_swsci |= PCI_SWSCI_GSSCIE;
290 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
291
292 /* Poll for the result. */
293#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
294 if (wait_for(C, dslp)) {
295 DRM_DEBUG_DRIVER("SWSCI request timed out\n");
296 return -ETIMEDOUT;
297 }
298
299 scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
300 SWSCI_SCIC_EXIT_STATUS_SHIFT;
301
302 /* Note: scic == 0 is an error! */
303 if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
304 DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
305 return -EIO;
306 }
307
308 if (parm_out)
309 *parm_out = ioread32(&swsci->parm);
310
311 return 0;
312
313#undef C
314}
315
316#define DISPLAY_TYPE_CRT 0
317#define DISPLAY_TYPE_TV 1
318#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
319#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
320
321int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
322 bool enable)
323{
324 struct drm_device *dev = intel_encoder->base.dev;
325 u32 parm = 0;
326 u32 type = 0;
327 u32 port;
328
329 /* don't care about old stuff for now */
330 if (!HAS_DDI(dev))
331 return 0;
332
333 port = intel_ddi_get_encoder_port(intel_encoder);
334 if (port == PORT_E) {
335 port = 0;
336 } else {
337 parm |= 1 << port;
338 port++;
339 }
340
341 if (!enable)
342 parm |= 4 << 8;
343
344 switch (intel_encoder->type) {
345 case INTEL_OUTPUT_ANALOG:
346 type = DISPLAY_TYPE_CRT;
347 break;
348 case INTEL_OUTPUT_UNKNOWN:
349 case INTEL_OUTPUT_DISPLAYPORT:
350 case INTEL_OUTPUT_HDMI:
351 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
352 break;
353 case INTEL_OUTPUT_EDP:
354 type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
355 break;
356 default:
357 WARN_ONCE(1, "unsupported intel_encoder type %d\n",
358 intel_encoder->type);
359 return -EINVAL;
360 }
361
362 parm |= type << (16 + port * 3);
363
364 return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
365}
366
367static const struct {
368 pci_power_t pci_power_state;
369 u32 parm;
370} power_state_map[] = {
371 { PCI_D0, 0x00 },
372 { PCI_D1, 0x01 },
373 { PCI_D2, 0x02 },
374 { PCI_D3hot, 0x04 },
375 { PCI_D3cold, 0x04 },
376};
377
378int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
379{
380 int i;
381
382 if (!HAS_DDI(dev))
383 return 0;
384
385 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
386 if (state == power_state_map[i].pci_power_state)
387 return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
388 power_state_map[i].parm, NULL);
389 }
390
391 return -EINVAL;
392}
393
161static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 394static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
162{ 395{
163 struct drm_i915_private *dev_priv = dev->dev_private; 396 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,11 +399,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
166 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 399 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
167 400
168 if (!(bclp & ASLE_BCLP_VALID)) 401 if (!(bclp & ASLE_BCLP_VALID))
169 return ASLE_BACKLIGHT_FAILED; 402 return ASLC_BACKLIGHT_FAILED;
170 403
171 bclp &= ASLE_BCLP_MSK; 404 bclp &= ASLE_BCLP_MSK;
172 if (bclp > 255) 405 if (bclp > 255)
173 return ASLE_BACKLIGHT_FAILED; 406 return ASLC_BACKLIGHT_FAILED;
174 407
175 intel_panel_set_backlight(dev, bclp, 255); 408 intel_panel_set_backlight(dev, bclp, 255);
176 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 409 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
@@ -183,13 +416,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
183 /* alsi is the current ALS reading in lux. 0 indicates below sensor 416 /* alsi is the current ALS reading in lux. 0 indicates below sensor
184 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 417 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
185 DRM_DEBUG_DRIVER("Illum is not supported\n"); 418 DRM_DEBUG_DRIVER("Illum is not supported\n");
186 return ASLE_ALS_ILLUM_FAILED; 419 return ASLC_ALS_ILLUM_FAILED;
187} 420}
188 421
189static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 422static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
190{ 423{
191 DRM_DEBUG_DRIVER("PWM freq is not supported\n"); 424 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
192 return ASLE_PWM_FREQ_FAILED; 425 return ASLC_PWM_FREQ_FAILED;
193} 426}
194 427
195static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 428static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +430,106 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
197 /* Panel fitting is currently controlled by the X code, so this is a 430 /* Panel fitting is currently controlled by the X code, so this is a
198 noop until modesetting support works fully */ 431 noop until modesetting support works fully */
199 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 432 DRM_DEBUG_DRIVER("Pfit is not supported\n");
200 return ASLE_PFIT_FAILED; 433 return ASLC_PFIT_FAILED;
434}
435
436static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
437{
438 DRM_DEBUG_DRIVER("SROT is not supported\n");
439 return ASLC_ROTATION_ANGLES_FAILED;
440}
441
442static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
443{
444 if (!iuer)
445 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
446 if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
447 DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
448 if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
449 DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
450 if (iuer & ASLE_IUER_VOLUME_UP_BTN)
451 DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
452 if (iuer & ASLE_IUER_WINDOWS_BTN)
453 DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
454 if (iuer & ASLE_IUER_POWER_BTN)
455 DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
456
457 return ASLC_BUTTON_ARRAY_FAILED;
458}
459
460static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
461{
462 if (iuer & ASLE_IUER_CONVERTIBLE)
463 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
464 else
465 DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
466
467 return ASLC_CONVERTIBLE_FAILED;
468}
469
470static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
471{
472 if (iuer & ASLE_IUER_DOCKING)
473 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
474 else
475 DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
476
477 return ASLC_DOCKING_FAILED;
478}
479
480static u32 asle_isct_state(struct drm_device *dev)
481{
482 DRM_DEBUG_DRIVER("ISCT is not supported\n");
483 return ASLC_ISCT_STATE_FAILED;
201} 484}
202 485
203void intel_opregion_asle_intr(struct drm_device *dev) 486void intel_opregion_asle_intr(struct drm_device *dev)
204{ 487{
205 struct drm_i915_private *dev_priv = dev->dev_private; 488 struct drm_i915_private *dev_priv = dev->dev_private;
206 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 489 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
207 u32 asle_stat = 0; 490 u32 aslc_stat = 0;
208 u32 asle_req; 491 u32 aslc_req;
209 492
210 if (!asle) 493 if (!asle)
211 return; 494 return;
212 495
213 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; 496 aslc_req = ioread32(&asle->aslc);
214 497
215 if (!asle_req) { 498 if (!(aslc_req & ASLC_REQ_MSK)) {
216 DRM_DEBUG_DRIVER("non asle set request??\n"); 499 DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
500 aslc_req);
217 return; 501 return;
218 } 502 }
219 503
220 if (asle_req & ASLE_SET_ALS_ILLUM) 504 if (aslc_req & ASLC_SET_ALS_ILLUM)
221 asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); 505 aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
506
507 if (aslc_req & ASLC_SET_BACKLIGHT)
508 aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
509
510 if (aslc_req & ASLC_SET_PFIT)
511 aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
512
513 if (aslc_req & ASLC_SET_PWM_FREQ)
514 aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
222 515
223 if (asle_req & ASLE_SET_BACKLIGHT) 516 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
224 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); 517 aslc_stat |= asle_set_supported_rotation_angles(dev,
518 ioread32(&asle->srot));
225 519
226 if (asle_req & ASLE_SET_PFIT) 520 if (aslc_req & ASLC_BUTTON_ARRAY)
227 asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); 521 aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
228 522
229 if (asle_req & ASLE_SET_PWM_FREQ) 523 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
230 asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); 524 aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
231 525
232 iowrite32(asle_stat, &asle->aslc); 526 if (aslc_req & ASLC_DOCKING_INDICATOR)
527 aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
528
529 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
530 aslc_stat |= asle_isct_state(dev);
531
532 iowrite32(aslc_stat, &asle->aslc);
233} 533}
234 534
235#define ACPI_EV_DISPLAY_SWITCH (1<<0) 535#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -446,8 +746,68 @@ void intel_opregion_fini(struct drm_device *dev)
446 opregion->swsci = NULL; 746 opregion->swsci = NULL;
447 opregion->asle = NULL; 747 opregion->asle = NULL;
448 opregion->vbt = NULL; 748 opregion->vbt = NULL;
749 opregion->lid_state = NULL;
750}
751
752static void swsci_setup(struct drm_device *dev)
753{
754 struct drm_i915_private *dev_priv = dev->dev_private;
755 struct intel_opregion *opregion = &dev_priv->opregion;
756 bool requested_callbacks = false;
757 u32 tmp;
758
759 /* Sub-function code 0 is okay, let's allow them. */
760 opregion->swsci_gbda_sub_functions = 1;
761 opregion->swsci_sbcb_sub_functions = 1;
762
763 /* We use GBDA to ask for supported GBDA calls. */
764 if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
765 /* make the bits match the sub-function codes */
766 tmp <<= 1;
767 opregion->swsci_gbda_sub_functions |= tmp;
768 }
769
770 /*
771 * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
772 * must not call interfaces that are not specifically requested by the
773 * bios.
774 */
775 if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
776 /* here, the bits already match sub-function codes */
777 opregion->swsci_sbcb_sub_functions |= tmp;
778 requested_callbacks = true;
779 }
780
781 /*
782 * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
783 * the callback is _requested_. But we still can't call interfaces that
784 * are not requested.
785 */
786 if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
787 /* make the bits match the sub-function codes */
788 u32 low = tmp & 0x7ff;
789 u32 high = tmp & ~0xfff; /* bit 11 is reserved */
790 tmp = (high << 4) | (low << 1) | 1;
791
792 /* best guess what to do with supported wrt requested */
793 if (requested_callbacks) {
794 u32 req = opregion->swsci_sbcb_sub_functions;
795 if ((req & tmp) != req)
796 DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
797 /* XXX: for now, trust the requested callbacks */
798 /* opregion->swsci_sbcb_sub_functions &= tmp; */
799 } else {
800 opregion->swsci_sbcb_sub_functions |= tmp;
801 }
802 }
803
804 DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
805 opregion->swsci_gbda_sub_functions,
806 opregion->swsci_sbcb_sub_functions);
449} 807}
450#endif 808#else /* CONFIG_ACPI */
809static inline void swsci_setup(struct drm_device *dev) {}
810#endif /* CONFIG_ACPI */
451 811
452int intel_opregion_setup(struct drm_device *dev) 812int intel_opregion_setup(struct drm_device *dev)
453{ 813{
@@ -490,6 +850,7 @@ int intel_opregion_setup(struct drm_device *dev)
490 if (mboxes & MBOX_SWSCI) { 850 if (mboxes & MBOX_SWSCI) {
491 DRM_DEBUG_DRIVER("SWSCI supported\n"); 851 DRM_DEBUG_DRIVER("SWSCI supported\n");
492 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 852 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
853 swsci_setup(dev);
493 } 854 }
494 if (mboxes & MBOX_ASLE) { 855 if (mboxes & MBOX_ASLE) {
495 DRM_DEBUG_DRIVER("ASLE supported\n"); 856 DRM_DEBUG_DRIVER("ASLE supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ddfd0aefe0c0..a98a990fbab3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
821static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, 821static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
822 struct intel_crtc *crtc) 822 struct intel_crtc *crtc)
823{ 823{
824 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
825
826 if (!crtc->active) 824 if (!crtc->active)
827 return -EINVAL; 825 return -EINVAL;
828 826
829 /* can't use the overlay with double wide pipe */ 827 /* can't use the overlay with double wide pipe */
830 if (INTEL_INFO(overlay->dev)->gen < 4 && 828 if (crtc->config.double_wide)
831 (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
832 return -EINVAL; 829 return -EINVAL;
833 830
834 return 0; 831 return 0;
@@ -1056,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1056 return ret; 1053 return ret;
1057 } 1054 }
1058 1055
1059 params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL); 1056 params = kmalloc(sizeof(*params), GFP_KERNEL);
1060 if (!params) 1057 if (!params)
1061 return -ENOMEM; 1058 return -ENOMEM;
1062 1059
@@ -1323,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
1323 if (!HAS_OVERLAY(dev)) 1320 if (!HAS_OVERLAY(dev))
1324 return; 1321 return;
1325 1322
1326 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); 1323 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1327 if (!overlay) 1324 if (!overlay)
1328 return; 1325 return;
1329 1326
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 293564a2896a..c81020923ee4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,23 +50,22 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
50 struct intel_crtc_config *pipe_config, 50 struct intel_crtc_config *pipe_config,
51 int fitting_mode) 51 int fitting_mode)
52{ 52{
53 struct drm_display_mode *mode, *adjusted_mode; 53 struct drm_display_mode *adjusted_mode;
54 int x, y, width, height; 54 int x, y, width, height;
55 55
56 mode = &pipe_config->requested_mode;
57 adjusted_mode = &pipe_config->adjusted_mode; 56 adjusted_mode = &pipe_config->adjusted_mode;
58 57
59 x = y = width = height = 0; 58 x = y = width = height = 0;
60 59
61 /* Native modes don't need fitting */ 60 /* Native modes don't need fitting */
62 if (adjusted_mode->hdisplay == mode->hdisplay && 61 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
63 adjusted_mode->vdisplay == mode->vdisplay) 62 adjusted_mode->vdisplay == pipe_config->pipe_src_h)
64 goto done; 63 goto done;
65 64
66 switch (fitting_mode) { 65 switch (fitting_mode) {
67 case DRM_MODE_SCALE_CENTER: 66 case DRM_MODE_SCALE_CENTER:
68 width = mode->hdisplay; 67 width = pipe_config->pipe_src_w;
69 height = mode->vdisplay; 68 height = pipe_config->pipe_src_h;
70 x = (adjusted_mode->hdisplay - width + 1)/2; 69 x = (adjusted_mode->hdisplay - width + 1)/2;
71 y = (adjusted_mode->vdisplay - height + 1)/2; 70 y = (adjusted_mode->vdisplay - height + 1)/2;
72 break; 71 break;
@@ -74,17 +73,19 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
74 case DRM_MODE_SCALE_ASPECT: 73 case DRM_MODE_SCALE_ASPECT:
75 /* Scale but preserve the aspect ratio */ 74 /* Scale but preserve the aspect ratio */
76 { 75 {
77 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 76 u32 scaled_width = adjusted_mode->hdisplay
78 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 77 * pipe_config->pipe_src_h;
78 u32 scaled_height = pipe_config->pipe_src_w
79 * adjusted_mode->vdisplay;
79 if (scaled_width > scaled_height) { /* pillar */ 80 if (scaled_width > scaled_height) { /* pillar */
80 width = scaled_height / mode->vdisplay; 81 width = scaled_height / pipe_config->pipe_src_h;
81 if (width & 1) 82 if (width & 1)
82 width++; 83 width++;
83 x = (adjusted_mode->hdisplay - width + 1) / 2; 84 x = (adjusted_mode->hdisplay - width + 1) / 2;
84 y = 0; 85 y = 0;
85 height = adjusted_mode->vdisplay; 86 height = adjusted_mode->vdisplay;
86 } else if (scaled_width < scaled_height) { /* letter */ 87 } else if (scaled_width < scaled_height) { /* letter */
87 height = scaled_width / mode->hdisplay; 88 height = scaled_width / pipe_config->pipe_src_w;
88 if (height & 1) 89 if (height & 1)
89 height++; 90 height++;
90 y = (adjusted_mode->vdisplay - height + 1) / 2; 91 y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
171 return (FACTOR * ratio + FACTOR/2) / FACTOR; 172 return (FACTOR * ratio + FACTOR/2) / FACTOR;
172} 173}
173 174
175static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
176 u32 *pfit_control)
177{
178 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
179 u32 scaled_width = adjusted_mode->hdisplay *
180 pipe_config->pipe_src_h;
181 u32 scaled_height = pipe_config->pipe_src_w *
182 adjusted_mode->vdisplay;
183
184 /* 965+ is easy, it does everything in hw */
185 if (scaled_width > scaled_height)
186 *pfit_control |= PFIT_ENABLE |
187 PFIT_SCALING_PILLAR;
188 else if (scaled_width < scaled_height)
189 *pfit_control |= PFIT_ENABLE |
190 PFIT_SCALING_LETTER;
191 else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
192 *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
193}
194
195static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
196 u32 *pfit_control, u32 *pfit_pgm_ratios,
197 u32 *border)
198{
199 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
200 u32 scaled_width = adjusted_mode->hdisplay *
201 pipe_config->pipe_src_h;
202 u32 scaled_height = pipe_config->pipe_src_w *
203 adjusted_mode->vdisplay;
204 u32 bits;
205
206 /*
207 * For earlier chips we have to calculate the scaling
208 * ratio by hand and program it into the
209 * PFIT_PGM_RATIO register
210 */
211 if (scaled_width > scaled_height) { /* pillar */
212 centre_horizontally(adjusted_mode,
213 scaled_height /
214 pipe_config->pipe_src_h);
215
216 *border = LVDS_BORDER_ENABLE;
217 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
218 bits = panel_fitter_scaling(pipe_config->pipe_src_h,
219 adjusted_mode->vdisplay);
220
221 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
222 bits << PFIT_VERT_SCALE_SHIFT);
223 *pfit_control |= (PFIT_ENABLE |
224 VERT_INTERP_BILINEAR |
225 HORIZ_INTERP_BILINEAR);
226 }
227 } else if (scaled_width < scaled_height) { /* letter */
228 centre_vertically(adjusted_mode,
229 scaled_width /
230 pipe_config->pipe_src_w);
231
232 *border = LVDS_BORDER_ENABLE;
233 if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
234 bits = panel_fitter_scaling(pipe_config->pipe_src_w,
235 adjusted_mode->hdisplay);
236
237 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
238 bits << PFIT_VERT_SCALE_SHIFT);
239 *pfit_control |= (PFIT_ENABLE |
240 VERT_INTERP_BILINEAR |
241 HORIZ_INTERP_BILINEAR);
242 }
243 } else {
244 /* Aspects match, Let hw scale both directions */
245 *pfit_control |= (PFIT_ENABLE |
246 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
247 VERT_INTERP_BILINEAR |
248 HORIZ_INTERP_BILINEAR);
249 }
250}
251
174void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, 252void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
175 struct intel_crtc_config *pipe_config, 253 struct intel_crtc_config *pipe_config,
176 int fitting_mode) 254 int fitting_mode)
177{ 255{
178 struct drm_device *dev = intel_crtc->base.dev; 256 struct drm_device *dev = intel_crtc->base.dev;
179 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 257 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
180 struct drm_display_mode *mode, *adjusted_mode; 258 struct drm_display_mode *adjusted_mode;
181 259
182 mode = &pipe_config->requested_mode;
183 adjusted_mode = &pipe_config->adjusted_mode; 260 adjusted_mode = &pipe_config->adjusted_mode;
184 261
185 /* Native modes don't need fitting */ 262 /* Native modes don't need fitting */
186 if (adjusted_mode->hdisplay == mode->hdisplay && 263 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
187 adjusted_mode->vdisplay == mode->vdisplay) 264 adjusted_mode->vdisplay == pipe_config->pipe_src_h)
188 goto out; 265 goto out;
189 266
190 switch (fitting_mode) { 267 switch (fitting_mode) {
@@ -193,81 +270,25 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
193 * For centered modes, we have to calculate border widths & 270 * For centered modes, we have to calculate border widths &
194 * heights and modify the values programmed into the CRTC. 271 * heights and modify the values programmed into the CRTC.
195 */ 272 */
196 centre_horizontally(adjusted_mode, mode->hdisplay); 273 centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
197 centre_vertically(adjusted_mode, mode->vdisplay); 274 centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
198 border = LVDS_BORDER_ENABLE; 275 border = LVDS_BORDER_ENABLE;
199 break; 276 break;
200 case DRM_MODE_SCALE_ASPECT: 277 case DRM_MODE_SCALE_ASPECT:
201 /* Scale but preserve the aspect ratio */ 278 /* Scale but preserve the aspect ratio */
202 if (INTEL_INFO(dev)->gen >= 4) { 279 if (INTEL_INFO(dev)->gen >= 4)
203 u32 scaled_width = adjusted_mode->hdisplay * 280 i965_scale_aspect(pipe_config, &pfit_control);
204 mode->vdisplay; 281 else
205 u32 scaled_height = mode->hdisplay * 282 i9xx_scale_aspect(pipe_config, &pfit_control,
206 adjusted_mode->vdisplay; 283 &pfit_pgm_ratios, &border);
207
208 /* 965+ is easy, it does everything in hw */
209 if (scaled_width > scaled_height)
210 pfit_control |= PFIT_ENABLE |
211 PFIT_SCALING_PILLAR;
212 else if (scaled_width < scaled_height)
213 pfit_control |= PFIT_ENABLE |
214 PFIT_SCALING_LETTER;
215 else if (adjusted_mode->hdisplay != mode->hdisplay)
216 pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
217 } else {
218 u32 scaled_width = adjusted_mode->hdisplay *
219 mode->vdisplay;
220 u32 scaled_height = mode->hdisplay *
221 adjusted_mode->vdisplay;
222 /*
223 * For earlier chips we have to calculate the scaling
224 * ratio by hand and program it into the
225 * PFIT_PGM_RATIO register
226 */
227 if (scaled_width > scaled_height) { /* pillar */
228 centre_horizontally(adjusted_mode,
229 scaled_height /
230 mode->vdisplay);
231
232 border = LVDS_BORDER_ENABLE;
233 if (mode->vdisplay != adjusted_mode->vdisplay) {
234 u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
235 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
236 bits << PFIT_VERT_SCALE_SHIFT);
237 pfit_control |= (PFIT_ENABLE |
238 VERT_INTERP_BILINEAR |
239 HORIZ_INTERP_BILINEAR);
240 }
241 } else if (scaled_width < scaled_height) { /* letter */
242 centre_vertically(adjusted_mode,
243 scaled_width /
244 mode->hdisplay);
245
246 border = LVDS_BORDER_ENABLE;
247 if (mode->hdisplay != adjusted_mode->hdisplay) {
248 u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
249 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
250 bits << PFIT_VERT_SCALE_SHIFT);
251 pfit_control |= (PFIT_ENABLE |
252 VERT_INTERP_BILINEAR |
253 HORIZ_INTERP_BILINEAR);
254 }
255 } else {
256 /* Aspects match, Let hw scale both directions */
257 pfit_control |= (PFIT_ENABLE |
258 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
259 VERT_INTERP_BILINEAR |
260 HORIZ_INTERP_BILINEAR);
261 }
262 }
263 break; 284 break;
264 case DRM_MODE_SCALE_FULLSCREEN: 285 case DRM_MODE_SCALE_FULLSCREEN:
265 /* 286 /*
266 * Full scaling, even if it changes the aspect ratio. 287 * Full scaling, even if it changes the aspect ratio.
267 * Fortunately this is all done for us in hw. 288 * Fortunately this is all done for us in hw.
268 */ 289 */
269 if (mode->vdisplay != adjusted_mode->vdisplay || 290 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
270 mode->hdisplay != adjusted_mode->hdisplay) { 291 pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
271 pfit_control |= PFIT_ENABLE; 292 pfit_control |= PFIT_ENABLE;
272 if (INTEL_INFO(dev)->gen >= 4) 293 if (INTEL_INFO(dev)->gen >= 4)
273 pfit_control |= PFIT_SCALING_AUTO; 294 pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
308{ 329{
309 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
310 331
311 if (INTEL_INFO(dev)->gen >= 4) 332 if (IS_GEN4(dev))
312 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; 333 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
313 334
314 if (IS_GEN2(dev)) 335 if (IS_GEN2(dev))
@@ -351,6 +372,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
351 I915_WRITE(BLC_PWM_CTL2, 372 I915_WRITE(BLC_PWM_CTL2,
352 dev_priv->regfile.saveBLC_PWM_CTL2); 373 dev_priv->regfile.saveBLC_PWM_CTL2);
353 } 374 }
375
376 if (IS_VALLEYVIEW(dev) && !val)
377 val = 0x0f42ffff;
354 } 378 }
355 379
356 return val; 380 return val;
@@ -441,7 +465,8 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
441 I915_WRITE(BLC_PWM_CPU_CTL, val | level); 465 I915_WRITE(BLC_PWM_CPU_CTL, val | level);
442} 466}
443 467
444static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) 468static void intel_panel_actually_set_backlight(struct drm_device *dev,
469 u32 level)
445{ 470{
446 struct drm_i915_private *dev_priv = dev->dev_private; 471 struct drm_i915_private *dev_priv = dev->dev_private;
447 u32 tmp; 472 u32 tmp;
@@ -607,10 +632,24 @@ set_level:
607 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 632 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
608} 633}
609 634
635/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
636static void intel_panel_init_backlight_regs(struct drm_device *dev)
637{
638 struct drm_i915_private *dev_priv = dev->dev_private;
639
640 if (IS_VALLEYVIEW(dev)) {
641 u32 cur_val = I915_READ(BLC_PWM_CTL) &
642 BACKLIGHT_DUTY_CYCLE_MASK;
643 I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val);
644 }
645}
646
610static void intel_panel_init_backlight(struct drm_device *dev) 647static void intel_panel_init_backlight(struct drm_device *dev)
611{ 648{
612 struct drm_i915_private *dev_priv = dev->dev_private; 649 struct drm_i915_private *dev_priv = dev->dev_private;
613 650
651 intel_panel_init_backlight_regs(dev);
652
614 dev_priv->backlight.level = intel_panel_get_backlight(dev); 653 dev_priv->backlight.level = intel_panel_get_backlight(dev);
615 dev_priv->backlight.enabled = dev_priv->backlight.level != 0; 654 dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
616} 655}
@@ -637,7 +676,7 @@ intel_panel_detect(struct drm_device *dev)
637 } 676 }
638} 677}
639 678
640#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 679#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
641static int intel_panel_update_status(struct backlight_device *bd) 680static int intel_panel_update_status(struct backlight_device *bd)
642{ 681{
643 struct drm_device *dev = bl_get_data(bd); 682 struct drm_device *dev = bl_get_data(bd);
@@ -683,7 +722,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
683 } 722 }
684 dev_priv->backlight.device = 723 dev_priv->backlight.device =
685 backlight_device_register("intel_backlight", 724 backlight_device_register("intel_backlight",
686 &connector->kdev, dev, 725 connector->kdev, dev,
687 &intel_panel_bl_ops, &props); 726 &intel_panel_bl_ops, &props);
688 727
689 if (IS_ERR(dev_priv->backlight.device)) { 728 if (IS_ERR(dev_priv->backlight.device)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index dd176b7296c1..008ec0bb017f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -43,14 +43,6 @@
43 * i915.i915_enable_fbc parameter 43 * i915.i915_enable_fbc parameter
44 */ 44 */
45 45
46static bool intel_crtc_active(struct drm_crtc *crtc)
47{
48 /* Be paranoid as we can arrive here with only partial
49 * state retrieved from the hardware during setup.
50 */
51 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
52}
53
54static void i8xx_disable_fbc(struct drm_device *dev) 46static void i8xx_disable_fbc(struct drm_device *dev)
55{ 47{
56 struct drm_i915_private *dev_priv = dev->dev_private; 48 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -378,7 +370,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
378 370
379 intel_cancel_fbc_work(dev_priv); 371 intel_cancel_fbc_work(dev_priv);
380 372
381 work = kzalloc(sizeof *work, GFP_KERNEL); 373 work = kzalloc(sizeof(*work), GFP_KERNEL);
382 if (work == NULL) { 374 if (work == NULL) {
383 DRM_ERROR("Failed to allocate FBC work structure\n"); 375 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 376 dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +450,8 @@ void intel_update_fbc(struct drm_device *dev)
458 struct drm_framebuffer *fb; 450 struct drm_framebuffer *fb;
459 struct intel_framebuffer *intel_fb; 451 struct intel_framebuffer *intel_fb;
460 struct drm_i915_gem_object *obj; 452 struct drm_i915_gem_object *obj;
461 unsigned int max_hdisplay, max_vdisplay; 453 const struct drm_display_mode *adjusted_mode;
454 unsigned int max_width, max_height;
462 455
463 if (!I915_HAS_FBC(dev)) { 456 if (!I915_HAS_FBC(dev)) {
464 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); 457 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -502,6 +495,7 @@ void intel_update_fbc(struct drm_device *dev)
502 fb = crtc->fb; 495 fb = crtc->fb;
503 intel_fb = to_intel_framebuffer(fb); 496 intel_fb = to_intel_framebuffer(fb);
504 obj = intel_fb->obj; 497 obj = intel_fb->obj;
498 adjusted_mode = &intel_crtc->config.adjusted_mode;
505 499
506 if (i915_enable_fbc < 0 && 500 if (i915_enable_fbc < 0 &&
507 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { 501 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +508,8 @@ void intel_update_fbc(struct drm_device *dev)
514 DRM_DEBUG_KMS("fbc disabled per module param\n"); 508 DRM_DEBUG_KMS("fbc disabled per module param\n");
515 goto out_disable; 509 goto out_disable;
516 } 510 }
517 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 511 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
518 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 512 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
519 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) 513 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
520 DRM_DEBUG_KMS("mode incompatible with compression, " 514 DRM_DEBUG_KMS("mode incompatible with compression, "
521 "disabling\n"); 515 "disabling\n");
@@ -523,14 +517,14 @@ void intel_update_fbc(struct drm_device *dev)
523 } 517 }
524 518
525 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 519 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
526 max_hdisplay = 4096; 520 max_width = 4096;
527 max_vdisplay = 2048; 521 max_height = 2048;
528 } else { 522 } else {
529 max_hdisplay = 2048; 523 max_width = 2048;
530 max_vdisplay = 1536; 524 max_height = 1536;
531 } 525 }
532 if ((crtc->mode.hdisplay > max_hdisplay) || 526 if (intel_crtc->config.pipe_src_w > max_width ||
533 (crtc->mode.vdisplay > max_vdisplay)) { 527 intel_crtc->config.pipe_src_h > max_height) {
534 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) 528 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
535 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 529 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
536 goto out_disable; 530 goto out_disable;
@@ -1087,8 +1081,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1087 return enabled; 1081 return enabled;
1088} 1082}
1089 1083
1090static void pineview_update_wm(struct drm_device *dev) 1084static void pineview_update_wm(struct drm_crtc *unused_crtc)
1091{ 1085{
1086 struct drm_device *dev = unused_crtc->dev;
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1087 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_crtc *crtc; 1088 struct drm_crtc *crtc;
1094 const struct cxsr_latency *latency; 1089 const struct cxsr_latency *latency;
@@ -1105,8 +1100,12 @@ static void pineview_update_wm(struct drm_device *dev)
1105 1100
1106 crtc = single_enabled_crtc(dev); 1101 crtc = single_enabled_crtc(dev);
1107 if (crtc) { 1102 if (crtc) {
1108 int clock = crtc->mode.clock; 1103 const struct drm_display_mode *adjusted_mode;
1109 int pixel_size = crtc->fb->bits_per_pixel / 8; 1104 int pixel_size = crtc->fb->bits_per_pixel / 8;
1105 int clock;
1106
1107 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1108 clock = adjusted_mode->crtc_clock;
1110 1109
1111 /* Display SR */ 1110 /* Display SR */
1112 wm = intel_calculate_wm(clock, &pineview_display_wm, 1111 wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1165,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1166 int *cursor_wm) 1165 int *cursor_wm)
1167{ 1166{
1168 struct drm_crtc *crtc; 1167 struct drm_crtc *crtc;
1168 const struct drm_display_mode *adjusted_mode;
1169 int htotal, hdisplay, clock, pixel_size; 1169 int htotal, hdisplay, clock, pixel_size;
1170 int line_time_us, line_count; 1170 int line_time_us, line_count;
1171 int entries, tlb_miss; 1171 int entries, tlb_miss;
@@ -1177,9 +1177,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1177 return false; 1177 return false;
1178 } 1178 }
1179 1179
1180 htotal = crtc->mode.htotal; 1180 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1181 hdisplay = crtc->mode.hdisplay; 1181 clock = adjusted_mode->crtc_clock;
1182 clock = crtc->mode.clock; 1182 htotal = adjusted_mode->htotal;
1183 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1183 pixel_size = crtc->fb->bits_per_pixel / 8; 1184 pixel_size = crtc->fb->bits_per_pixel / 8;
1184 1185
1185 /* Use the small buffer method to calculate plane watermark */ 1186 /* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1251,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1250 int *display_wm, int *cursor_wm) 1251 int *display_wm, int *cursor_wm)
1251{ 1252{
1252 struct drm_crtc *crtc; 1253 struct drm_crtc *crtc;
1254 const struct drm_display_mode *adjusted_mode;
1253 int hdisplay, htotal, pixel_size, clock; 1255 int hdisplay, htotal, pixel_size, clock;
1254 unsigned long line_time_us; 1256 unsigned long line_time_us;
1255 int line_count, line_size; 1257 int line_count, line_size;
@@ -1262,9 +1264,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1262 } 1264 }
1263 1265
1264 crtc = intel_get_crtc_for_plane(dev, plane); 1266 crtc = intel_get_crtc_for_plane(dev, plane);
1265 hdisplay = crtc->mode.hdisplay; 1267 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1266 htotal = crtc->mode.htotal; 1268 clock = adjusted_mode->crtc_clock;
1267 clock = crtc->mode.clock; 1269 htotal = adjusted_mode->htotal;
1270 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1268 pixel_size = crtc->fb->bits_per_pixel / 8; 1271 pixel_size = crtc->fb->bits_per_pixel / 8;
1269 1272
1270 line_time_us = (htotal * 1000) / clock; 1273 line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1306,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1303 if (!intel_crtc_active(crtc)) 1306 if (!intel_crtc_active(crtc))
1304 return false; 1307 return false;
1305 1308
1306 clock = crtc->mode.clock; /* VESA DOT Clock */ 1309 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1307 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ 1310 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1308 1311
1309 entries = (clock / 1000) * pixel_size; 1312 entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1368,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
1365 1368
1366#define single_plane_enabled(mask) is_power_of_2(mask) 1369#define single_plane_enabled(mask) is_power_of_2(mask)
1367 1370
1368static void valleyview_update_wm(struct drm_device *dev) 1371static void valleyview_update_wm(struct drm_crtc *crtc)
1369{ 1372{
1373 struct drm_device *dev = crtc->dev;
1370 static const int sr_latency_ns = 12000; 1374 static const int sr_latency_ns = 12000;
1371 struct drm_i915_private *dev_priv = dev->dev_private; 1375 struct drm_i915_private *dev_priv = dev->dev_private;
1372 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1376 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1428,9 @@ static void valleyview_update_wm(struct drm_device *dev)
1424 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1428 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1425} 1429}
1426 1430
1427static void g4x_update_wm(struct drm_device *dev) 1431static void g4x_update_wm(struct drm_crtc *crtc)
1428{ 1432{
1433 struct drm_device *dev = crtc->dev;
1429 static const int sr_latency_ns = 12000; 1434 static const int sr_latency_ns = 12000;
1430 struct drm_i915_private *dev_priv = dev->dev_private; 1435 struct drm_i915_private *dev_priv = dev->dev_private;
1431 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1436 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1481,9 @@ static void g4x_update_wm(struct drm_device *dev)
1476 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1481 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1477} 1482}
1478 1483
1479static void i965_update_wm(struct drm_device *dev) 1484static void i965_update_wm(struct drm_crtc *unused_crtc)
1480{ 1485{
1486 struct drm_device *dev = unused_crtc->dev;
1481 struct drm_i915_private *dev_priv = dev->dev_private; 1487 struct drm_i915_private *dev_priv = dev->dev_private;
1482 struct drm_crtc *crtc; 1488 struct drm_crtc *crtc;
1483 int srwm = 1; 1489 int srwm = 1;
@@ -1488,9 +1494,11 @@ static void i965_update_wm(struct drm_device *dev)
1488 if (crtc) { 1494 if (crtc) {
1489 /* self-refresh has much higher latency */ 1495 /* self-refresh has much higher latency */
1490 static const int sr_latency_ns = 12000; 1496 static const int sr_latency_ns = 12000;
1491 int clock = crtc->mode.clock; 1497 const struct drm_display_mode *adjusted_mode =
1492 int htotal = crtc->mode.htotal; 1498 &to_intel_crtc(crtc)->config.adjusted_mode;
1493 int hdisplay = crtc->mode.hdisplay; 1499 int clock = adjusted_mode->crtc_clock;
1500 int htotal = adjusted_mode->htotal;
1501 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1494 int pixel_size = crtc->fb->bits_per_pixel / 8; 1502 int pixel_size = crtc->fb->bits_per_pixel / 8;
1495 unsigned long line_time_us; 1503 unsigned long line_time_us;
1496 int entries; 1504 int entries;
@@ -1541,8 +1549,9 @@ static void i965_update_wm(struct drm_device *dev)
1541 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1549 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1542} 1550}
1543 1551
1544static void i9xx_update_wm(struct drm_device *dev) 1552static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545{ 1553{
1554 struct drm_device *dev = unused_crtc->dev;
1546 struct drm_i915_private *dev_priv = dev->dev_private; 1555 struct drm_i915_private *dev_priv = dev->dev_private;
1547 const struct intel_watermark_params *wm_info; 1556 const struct intel_watermark_params *wm_info;
1548 uint32_t fwater_lo; 1557 uint32_t fwater_lo;
@@ -1562,11 +1571,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1562 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1571 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1563 crtc = intel_get_crtc_for_plane(dev, 0); 1572 crtc = intel_get_crtc_for_plane(dev, 0);
1564 if (intel_crtc_active(crtc)) { 1573 if (intel_crtc_active(crtc)) {
1574 const struct drm_display_mode *adjusted_mode;
1565 int cpp = crtc->fb->bits_per_pixel / 8; 1575 int cpp = crtc->fb->bits_per_pixel / 8;
1566 if (IS_GEN2(dev)) 1576 if (IS_GEN2(dev))
1567 cpp = 4; 1577 cpp = 4;
1568 1578
1569 planea_wm = intel_calculate_wm(crtc->mode.clock, 1579 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1580 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1570 wm_info, fifo_size, cpp, 1581 wm_info, fifo_size, cpp,
1571 latency_ns); 1582 latency_ns);
1572 enabled = crtc; 1583 enabled = crtc;
@@ -1576,11 +1587,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1576 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1587 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1577 crtc = intel_get_crtc_for_plane(dev, 1); 1588 crtc = intel_get_crtc_for_plane(dev, 1);
1578 if (intel_crtc_active(crtc)) { 1589 if (intel_crtc_active(crtc)) {
1590 const struct drm_display_mode *adjusted_mode;
1579 int cpp = crtc->fb->bits_per_pixel / 8; 1591 int cpp = crtc->fb->bits_per_pixel / 8;
1580 if (IS_GEN2(dev)) 1592 if (IS_GEN2(dev))
1581 cpp = 4; 1593 cpp = 4;
1582 1594
1583 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1595 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1596 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1584 wm_info, fifo_size, cpp, 1597 wm_info, fifo_size, cpp,
1585 latency_ns); 1598 latency_ns);
1586 if (enabled == NULL) 1599 if (enabled == NULL)
@@ -1607,9 +1620,11 @@ static void i9xx_update_wm(struct drm_device *dev)
1607 if (HAS_FW_BLC(dev) && enabled) { 1620 if (HAS_FW_BLC(dev) && enabled) {
1608 /* self-refresh has much higher latency */ 1621 /* self-refresh has much higher latency */
1609 static const int sr_latency_ns = 6000; 1622 static const int sr_latency_ns = 6000;
1610 int clock = enabled->mode.clock; 1623 const struct drm_display_mode *adjusted_mode =
1611 int htotal = enabled->mode.htotal; 1624 &to_intel_crtc(enabled)->config.adjusted_mode;
1612 int hdisplay = enabled->mode.hdisplay; 1625 int clock = adjusted_mode->crtc_clock;
1626 int htotal = adjusted_mode->htotal;
1627 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1613 int pixel_size = enabled->fb->bits_per_pixel / 8; 1628 int pixel_size = enabled->fb->bits_per_pixel / 8;
1614 unsigned long line_time_us; 1629 unsigned long line_time_us;
1615 int entries; 1630 int entries;
@@ -1658,10 +1673,12 @@ static void i9xx_update_wm(struct drm_device *dev)
1658 } 1673 }
1659} 1674}
1660 1675
1661static void i830_update_wm(struct drm_device *dev) 1676static void i830_update_wm(struct drm_crtc *unused_crtc)
1662{ 1677{
1678 struct drm_device *dev = unused_crtc->dev;
1663 struct drm_i915_private *dev_priv = dev->dev_private; 1679 struct drm_i915_private *dev_priv = dev->dev_private;
1664 struct drm_crtc *crtc; 1680 struct drm_crtc *crtc;
1681 const struct drm_display_mode *adjusted_mode;
1665 uint32_t fwater_lo; 1682 uint32_t fwater_lo;
1666 int planea_wm; 1683 int planea_wm;
1667 1684
@@ -1669,7 +1686,9 @@ static void i830_update_wm(struct drm_device *dev)
1669 if (crtc == NULL) 1686 if (crtc == NULL)
1670 return; 1687 return;
1671 1688
1672 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1689 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1690 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1691 &i830_wm_info,
1673 dev_priv->display.get_fifo_size(dev, 0), 1692 dev_priv->display.get_fifo_size(dev, 0),
1674 4, latency_ns); 1693 4, latency_ns);
1675 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1694 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1760,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1741 int *fbc_wm, int *display_wm, int *cursor_wm) 1760 int *fbc_wm, int *display_wm, int *cursor_wm)
1742{ 1761{
1743 struct drm_crtc *crtc; 1762 struct drm_crtc *crtc;
1763 const struct drm_display_mode *adjusted_mode;
1744 unsigned long line_time_us; 1764 unsigned long line_time_us;
1745 int hdisplay, htotal, pixel_size, clock; 1765 int hdisplay, htotal, pixel_size, clock;
1746 int line_count, line_size; 1766 int line_count, line_size;
@@ -1753,9 +1773,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1753 } 1773 }
1754 1774
1755 crtc = intel_get_crtc_for_plane(dev, plane); 1775 crtc = intel_get_crtc_for_plane(dev, plane);
1756 hdisplay = crtc->mode.hdisplay; 1776 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1757 htotal = crtc->mode.htotal; 1777 clock = adjusted_mode->crtc_clock;
1758 clock = crtc->mode.clock; 1778 htotal = adjusted_mode->htotal;
1779 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1759 pixel_size = crtc->fb->bits_per_pixel / 8; 1780 pixel_size = crtc->fb->bits_per_pixel / 8;
1760 1781
1761 line_time_us = (htotal * 1000) / clock; 1782 line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1806,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1785 display, cursor); 1806 display, cursor);
1786} 1807}
1787 1808
1788static void ironlake_update_wm(struct drm_device *dev) 1809static void ironlake_update_wm(struct drm_crtc *crtc)
1789{ 1810{
1811 struct drm_device *dev = crtc->dev;
1790 struct drm_i915_private *dev_priv = dev->dev_private; 1812 struct drm_i915_private *dev_priv = dev->dev_private;
1791 int fbc_wm, plane_wm, cursor_wm; 1813 int fbc_wm, plane_wm, cursor_wm;
1792 unsigned int enabled; 1814 unsigned int enabled;
@@ -1868,8 +1890,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1868 */ 1890 */
1869} 1891}
1870 1892
1871static void sandybridge_update_wm(struct drm_device *dev) 1893static void sandybridge_update_wm(struct drm_crtc *crtc)
1872{ 1894{
1895 struct drm_device *dev = crtc->dev;
1873 struct drm_i915_private *dev_priv = dev->dev_private; 1896 struct drm_i915_private *dev_priv = dev->dev_private;
1874 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 1897 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1875 u32 val; 1898 u32 val;
@@ -1970,8 +1993,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
1970 cursor_wm); 1993 cursor_wm);
1971} 1994}
1972 1995
1973static void ivybridge_update_wm(struct drm_device *dev) 1996static void ivybridge_update_wm(struct drm_crtc *crtc)
1974{ 1997{
1998 struct drm_device *dev = crtc->dev;
1975 struct drm_i915_private *dev_priv = dev->dev_private; 1999 struct drm_i915_private *dev_priv = dev->dev_private;
1976 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 2000 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1977 u32 val; 2001 u32 val;
@@ -2098,7 +2122,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2099 uint32_t pixel_rate; 2123 uint32_t pixel_rate;
2100 2124
2101 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2125 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
2102 2126
2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2127 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2104 * adjust the pixel_rate here. */ 2128 * adjust the pixel_rate here. */
@@ -2107,8 +2131,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2107 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 2131 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2108 uint32_t pfit_size = intel_crtc->config.pch_pfit.size; 2132 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
2109 2133
2110 pipe_w = intel_crtc->config.requested_mode.hdisplay; 2134 pipe_w = intel_crtc->config.pipe_src_w;
2111 pipe_h = intel_crtc->config.requested_mode.vdisplay; 2135 pipe_h = intel_crtc->config.pipe_src_h;
2112 pfit_w = (pfit_size >> 16) & 0xFFFF; 2136 pfit_w = (pfit_size >> 16) & 0xFFFF;
2113 pfit_h = pfit_size & 0xFFFF; 2137 pfit_h = pfit_size & 0xFFFF;
2114 if (pipe_w < pfit_w) 2138 if (pipe_w < pfit_w)
@@ -2196,7 +2220,7 @@ struct intel_wm_config {
2196 * For both WM_PIPE and WM_LP. 2220 * For both WM_PIPE and WM_LP.
2197 * mem_value must be in 0.1us units. 2221 * mem_value must be in 0.1us units.
2198 */ 2222 */
2199static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, 2223static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
2200 uint32_t mem_value, 2224 uint32_t mem_value,
2201 bool is_lp) 2225 bool is_lp)
2202{ 2226{
@@ -2225,7 +2249,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2225 * For both WM_PIPE and WM_LP. 2249 * For both WM_PIPE and WM_LP.
2226 * mem_value must be in 0.1us units. 2250 * mem_value must be in 0.1us units.
2227 */ 2251 */
2228static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, 2252static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
2229 uint32_t mem_value) 2253 uint32_t mem_value)
2230{ 2254{
2231 uint32_t method1, method2; 2255 uint32_t method1, method2;
@@ -2248,7 +2272,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2248 * For both WM_PIPE and WM_LP. 2272 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units. 2273 * mem_value must be in 0.1us units.
2250 */ 2274 */
2251static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, 2275static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
2252 uint32_t mem_value) 2276 uint32_t mem_value)
2253{ 2277{
2254 if (!params->active || !params->cur.enabled) 2278 if (!params->active || !params->cur.enabled)
@@ -2262,7 +2286,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2262} 2286}
2263 2287
2264/* Only for WM_LP. */ 2288/* Only for WM_LP. */
2265static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, 2289static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
2266 uint32_t pri_val) 2290 uint32_t pri_val)
2267{ 2291{
2268 if (!params->active || !params->pri.enabled) 2292 if (!params->active || !params->pri.enabled)
@@ -2413,7 +2437,7 @@ static bool ilk_check_wm(int level,
2413 2437
2414static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 2438static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2415 int level, 2439 int level,
2416 struct hsw_pipe_wm_parameters *p, 2440 const struct hsw_pipe_wm_parameters *p,
2417 struct intel_wm_level *result) 2441 struct intel_wm_level *result)
2418{ 2442{
2419 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2443 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2435,8 +2459,8 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2435} 2459}
2436 2460
2437static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, 2461static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
2438 int level, struct hsw_wm_maximums *max, 2462 int level, const struct hsw_wm_maximums *max,
2439 struct hsw_pipe_wm_parameters *params, 2463 const struct hsw_pipe_wm_parameters *params,
2440 struct intel_wm_level *result) 2464 struct intel_wm_level *result)
2441{ 2465{
2442 enum pipe pipe; 2466 enum pipe pipe;
@@ -2454,33 +2478,31 @@ static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
2454 return ilk_check_wm(level, max, result); 2478 return ilk_check_wm(level, max, result);
2455} 2479}
2456 2480
2457static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv, 2481
2458 enum pipe pipe, 2482static uint32_t hsw_compute_wm_pipe(struct drm_device *dev,
2459 struct hsw_pipe_wm_parameters *params) 2483 const struct hsw_pipe_wm_parameters *params)
2460{ 2484{
2461 uint32_t pri_val, cur_val, spr_val; 2485 struct drm_i915_private *dev_priv = dev->dev_private;
2462 /* WM0 latency values stored in 0.1us units */ 2486 struct intel_wm_config config = {
2463 uint16_t pri_latency = dev_priv->wm.pri_latency[0]; 2487 .num_pipes_active = 1,
2464 uint16_t spr_latency = dev_priv->wm.spr_latency[0]; 2488 .sprites_enabled = params->spr.enabled,
2465 uint16_t cur_latency = dev_priv->wm.cur_latency[0]; 2489 .sprites_scaled = params->spr.scaled,
2490 };
2491 struct hsw_wm_maximums max;
2492 struct intel_wm_level res;
2493
2494 if (!params->active)
2495 return 0;
2496
2497 ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2466 2498
2467 pri_val = ilk_compute_pri_wm(params, pri_latency, false); 2499 ilk_compute_wm_level(dev_priv, 0, params, &res);
2468 spr_val = ilk_compute_spr_wm(params, spr_latency);
2469 cur_val = ilk_compute_cur_wm(params, cur_latency);
2470 2500
2471 WARN(pri_val > 127, 2501 ilk_check_wm(0, &max, &res);
2472 "Primary WM error, mode not supported for pipe %c\n",
2473 pipe_name(pipe));
2474 WARN(spr_val > 127,
2475 "Sprite WM error, mode not supported for pipe %c\n",
2476 pipe_name(pipe));
2477 WARN(cur_val > 63,
2478 "Cursor WM error, mode not supported for pipe %c\n",
2479 pipe_name(pipe));
2480 2502
2481 return (pri_val << WM0_PIPE_PLANE_SHIFT) | 2503 return (res.pri_val << WM0_PIPE_PLANE_SHIFT) |
2482 (spr_val << WM0_PIPE_SPRITE_SHIFT) | 2504 (res.spr_val << WM0_PIPE_SPRITE_SHIFT) |
2483 cur_val; 2505 res.cur_val;
2484} 2506}
2485 2507
2486static uint32_t 2508static uint32_t
@@ -2554,19 +2576,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2554 wm[3] *= 2; 2576 wm[3] *= 2;
2555} 2577}
2556 2578
2557static void intel_print_wm_latency(struct drm_device *dev, 2579static int ilk_wm_max_level(const struct drm_device *dev)
2558 const char *name,
2559 const uint16_t wm[5])
2560{ 2580{
2561 int level, max_level;
2562
2563 /* how many WM levels are we expecting */ 2581 /* how many WM levels are we expecting */
2564 if (IS_HASWELL(dev)) 2582 if (IS_HASWELL(dev))
2565 max_level = 4; 2583 return 4;
2566 else if (INTEL_INFO(dev)->gen >= 6) 2584 else if (INTEL_INFO(dev)->gen >= 6)
2567 max_level = 3; 2585 return 3;
2568 else 2586 else
2569 max_level = 2; 2587 return 2;
2588}
2589
2590static void intel_print_wm_latency(struct drm_device *dev,
2591 const char *name,
2592 const uint16_t wm[5])
2593{
2594 int level, max_level = ilk_wm_max_level(dev);
2570 2595
2571 for (level = 0; level <= max_level; level++) { 2596 for (level = 0; level <= max_level; level++) {
2572 unsigned int latency = wm[level]; 2597 unsigned int latency = wm[level];
@@ -2633,8 +2658,7 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
2633 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2658 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2634 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2659 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2635 p->cur.bytes_per_pixel = 4; 2660 p->cur.bytes_per_pixel = 4;
2636 p->pri.horiz_pixels = 2661 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2637 intel_crtc->config.requested_mode.hdisplay;
2638 p->cur.horiz_pixels = 64; 2662 p->cur.horiz_pixels = 64;
2639 /* TODO: for now, assume primary and cursor planes are always enabled. */ 2663 /* TODO: for now, assume primary and cursor planes are always enabled. */
2640 p->pri.enabled = true; 2664 p->pri.enabled = true;
@@ -2664,8 +2688,8 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
2664} 2688}
2665 2689
2666static void hsw_compute_wm_results(struct drm_device *dev, 2690static void hsw_compute_wm_results(struct drm_device *dev,
2667 struct hsw_pipe_wm_parameters *params, 2691 const struct hsw_pipe_wm_parameters *params,
2668 struct hsw_wm_maximums *lp_maximums, 2692 const struct hsw_wm_maximums *lp_maximums,
2669 struct hsw_wm_values *results) 2693 struct hsw_wm_values *results)
2670{ 2694{
2671 struct drm_i915_private *dev_priv = dev->dev_private; 2695 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2709,7 +2733,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2709 } 2733 }
2710 2734
2711 for_each_pipe(pipe) 2735 for_each_pipe(pipe)
2712 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe, 2736 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev,
2713 &params[pipe]); 2737 &params[pipe]);
2714 2738
2715 for_each_pipe(pipe) { 2739 for_each_pipe(pipe) {
@@ -2841,8 +2865,9 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2841 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2865 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2842} 2866}
2843 2867
2844static void haswell_update_wm(struct drm_device *dev) 2868static void haswell_update_wm(struct drm_crtc *crtc)
2845{ 2869{
2870 struct drm_device *dev = crtc->dev;
2846 struct drm_i915_private *dev_priv = dev->dev_private; 2871 struct drm_i915_private *dev_priv = dev->dev_private;
2847 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; 2872 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2848 struct hsw_pipe_wm_parameters params[3]; 2873 struct hsw_pipe_wm_parameters params[3];
@@ -2879,7 +2904,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
2879 intel_plane->wm.horiz_pixels = sprite_width; 2904 intel_plane->wm.horiz_pixels = sprite_width;
2880 intel_plane->wm.bytes_per_pixel = pixel_size; 2905 intel_plane->wm.bytes_per_pixel = pixel_size;
2881 2906
2882 haswell_update_wm(plane->dev); 2907 haswell_update_wm(crtc);
2883} 2908}
2884 2909
2885static bool 2910static bool
@@ -2898,7 +2923,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2898 return false; 2923 return false;
2899 } 2924 }
2900 2925
2901 clock = crtc->mode.clock; 2926 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2902 2927
2903 /* Use the small buffer method to calculate the sprite watermark */ 2928 /* Use the small buffer method to calculate the sprite watermark */
2904 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 2929 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +2958,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2933 } 2958 }
2934 2959
2935 crtc = intel_get_crtc_for_plane(dev, plane); 2960 crtc = intel_get_crtc_for_plane(dev, plane);
2936 clock = crtc->mode.clock; 2961 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2937 if (!clock) { 2962 if (!clock) {
2938 *sprite_wm = 0; 2963 *sprite_wm = 0;
2939 return false; 2964 return false;
@@ -3076,12 +3101,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3076 * We don't use the sprite, so we can ignore that. And on Crestline we have 3101 * We don't use the sprite, so we can ignore that. And on Crestline we have
3077 * to set the non-SR watermarks to 8. 3102 * to set the non-SR watermarks to 8.
3078 */ 3103 */
3079void intel_update_watermarks(struct drm_device *dev) 3104void intel_update_watermarks(struct drm_crtc *crtc)
3080{ 3105{
3081 struct drm_i915_private *dev_priv = dev->dev_private; 3106 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3082 3107
3083 if (dev_priv->display.update_wm) 3108 if (dev_priv->display.update_wm)
3084 dev_priv->display.update_wm(dev); 3109 dev_priv->display.update_wm(crtc);
3085} 3110}
3086 3111
3087void intel_update_sprite_watermarks(struct drm_plane *plane, 3112void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3312,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3287 return limits; 3312 return limits;
3288} 3313}
3289 3314
3315static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3316{
3317 int new_power;
3318
3319 new_power = dev_priv->rps.power;
3320 switch (dev_priv->rps.power) {
3321 case LOW_POWER:
3322 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3323 new_power = BETWEEN;
3324 break;
3325
3326 case BETWEEN:
3327 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3328 new_power = LOW_POWER;
3329 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3330 new_power = HIGH_POWER;
3331 break;
3332
3333 case HIGH_POWER:
3334 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3335 new_power = BETWEEN;
3336 break;
3337 }
3338 /* Max/min bins are special */
3339 if (val == dev_priv->rps.min_delay)
3340 new_power = LOW_POWER;
3341 if (val == dev_priv->rps.max_delay)
3342 new_power = HIGH_POWER;
3343 if (new_power == dev_priv->rps.power)
3344 return;
3345
3346 /* Note the units here are not exactly 1us, but 1280ns. */
3347 switch (new_power) {
3348 case LOW_POWER:
3349 /* Upclock if more than 95% busy over 16ms */
3350 I915_WRITE(GEN6_RP_UP_EI, 12500);
3351 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3352
3353 /* Downclock if less than 85% busy over 32ms */
3354 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3355 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3356
3357 I915_WRITE(GEN6_RP_CONTROL,
3358 GEN6_RP_MEDIA_TURBO |
3359 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3360 GEN6_RP_MEDIA_IS_GFX |
3361 GEN6_RP_ENABLE |
3362 GEN6_RP_UP_BUSY_AVG |
3363 GEN6_RP_DOWN_IDLE_AVG);
3364 break;
3365
3366 case BETWEEN:
3367 /* Upclock if more than 90% busy over 13ms */
3368 I915_WRITE(GEN6_RP_UP_EI, 10250);
3369 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3370
3371 /* Downclock if less than 75% busy over 32ms */
3372 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3373 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3374
3375 I915_WRITE(GEN6_RP_CONTROL,
3376 GEN6_RP_MEDIA_TURBO |
3377 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3378 GEN6_RP_MEDIA_IS_GFX |
3379 GEN6_RP_ENABLE |
3380 GEN6_RP_UP_BUSY_AVG |
3381 GEN6_RP_DOWN_IDLE_AVG);
3382 break;
3383
3384 case HIGH_POWER:
3385 /* Upclock if more than 85% busy over 10ms */
3386 I915_WRITE(GEN6_RP_UP_EI, 8000);
3387 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3388
3389 /* Downclock if less than 60% busy over 32ms */
3390 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3391 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3392
3393 I915_WRITE(GEN6_RP_CONTROL,
3394 GEN6_RP_MEDIA_TURBO |
3395 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3396 GEN6_RP_MEDIA_IS_GFX |
3397 GEN6_RP_ENABLE |
3398 GEN6_RP_UP_BUSY_AVG |
3399 GEN6_RP_DOWN_IDLE_AVG);
3400 break;
3401 }
3402
3403 dev_priv->rps.power = new_power;
3404 dev_priv->rps.last_adj = 0;
3405}
3406
3290void gen6_set_rps(struct drm_device *dev, u8 val) 3407void gen6_set_rps(struct drm_device *dev, u8 val)
3291{ 3408{
3292 struct drm_i915_private *dev_priv = dev->dev_private; 3409 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3416,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3299 if (val == dev_priv->rps.cur_delay) 3416 if (val == dev_priv->rps.cur_delay)
3300 return; 3417 return;
3301 3418
3419 gen6_set_rps_thresholds(dev_priv, val);
3420
3302 if (IS_HASWELL(dev)) 3421 if (IS_HASWELL(dev))
3303 I915_WRITE(GEN6_RPNSWREQ, 3422 I915_WRITE(GEN6_RPNSWREQ,
3304 HSW_FREQUENCY(val)); 3423 HSW_FREQUENCY(val));
@@ -3320,6 +3439,28 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3320 trace_intel_gpu_freq_change(val * 50); 3439 trace_intel_gpu_freq_change(val * 50);
3321} 3440}
3322 3441
3442void gen6_rps_idle(struct drm_i915_private *dev_priv)
3443{
3444 mutex_lock(&dev_priv->rps.hw_lock);
3445 if (dev_priv->info->is_valleyview)
3446 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3447 else
3448 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3449 dev_priv->rps.last_adj = 0;
3450 mutex_unlock(&dev_priv->rps.hw_lock);
3451}
3452
3453void gen6_rps_boost(struct drm_i915_private *dev_priv)
3454{
3455 mutex_lock(&dev_priv->rps.hw_lock);
3456 if (dev_priv->info->is_valleyview)
3457 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3458 else
3459 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3460 dev_priv->rps.last_adj = 0;
3461 mutex_unlock(&dev_priv->rps.hw_lock);
3462}
3463
3323/* 3464/*
3324 * Wait until the previous freq change has completed, 3465 * Wait until the previous freq change has completed,
3325 * or the timeout elapsed, and then update our notion 3466 * or the timeout elapsed, and then update our notion
@@ -3501,7 +3642,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3501 3642
3502 /* In units of 50MHz */ 3643 /* In units of 50MHz */
3503 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3644 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3504 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; 3645 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3646 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
3647 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
3648 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3505 dev_priv->rps.cur_delay = 0; 3649 dev_priv->rps.cur_delay = 0;
3506 3650
3507 /* disable the counters and set deterministic thresholds */ 3651 /* disable the counters and set deterministic thresholds */
@@ -3549,38 +3693,9 @@ static void gen6_enable_rps(struct drm_device *dev)
3549 GEN6_RC_CTL_EI_MODE(1) | 3693 GEN6_RC_CTL_EI_MODE(1) |
3550 GEN6_RC_CTL_HW_ENABLE); 3694 GEN6_RC_CTL_HW_ENABLE);
3551 3695
3552 if (IS_HASWELL(dev)) { 3696 /* Power down if completely idle for over 50ms */
3553 I915_WRITE(GEN6_RPNSWREQ, 3697 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3554 HSW_FREQUENCY(10));
3555 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3556 HSW_FREQUENCY(12));
3557 } else {
3558 I915_WRITE(GEN6_RPNSWREQ,
3559 GEN6_FREQUENCY(10) |
3560 GEN6_OFFSET(0) |
3561 GEN6_AGGRESSIVE_TURBO);
3562 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3563 GEN6_FREQUENCY(12));
3564 }
3565
3566 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3567 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3568 dev_priv->rps.max_delay << 24 |
3569 dev_priv->rps.min_delay << 16);
3570
3571 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3572 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3573 I915_WRITE(GEN6_RP_UP_EI, 66000);
3574 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3575
3576 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3698 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3577 I915_WRITE(GEN6_RP_CONTROL,
3578 GEN6_RP_MEDIA_TURBO |
3579 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3580 GEN6_RP_MEDIA_IS_GFX |
3581 GEN6_RP_ENABLE |
3582 GEN6_RP_UP_BUSY_AVG |
3583 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3584 3699
3585 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3700 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3586 if (!ret) { 3701 if (!ret) {
@@ -3596,7 +3711,8 @@ static void gen6_enable_rps(struct drm_device *dev)
3596 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3711 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3597 } 3712 }
3598 3713
3599 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3714 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3715 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3600 3716
3601 gen6_enable_rps_interrupts(dev); 3717 gen6_enable_rps_interrupts(dev);
3602 3718
@@ -3638,9 +3754,9 @@ void gen6_update_ring_freq(struct drm_device *dev)
3638 /* Convert from kHz to MHz */ 3754 /* Convert from kHz to MHz */
3639 max_ia_freq /= 1000; 3755 max_ia_freq /= 1000;
3640 3756
3641 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK); 3757 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
3642 /* convert DDR frequency from units of 133.3MHz to bandwidth */ 3758 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3643 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3; 3759 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3644 3760
3645 /* 3761 /*
3646 * For each potential GPU frequency, load a ring frequency we'd like 3762 * For each potential GPU frequency, load a ring frequency we'd like
@@ -3653,7 +3769,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
3653 unsigned int ia_freq = 0, ring_freq = 0; 3769 unsigned int ia_freq = 0, ring_freq = 0;
3654 3770
3655 if (IS_HASWELL(dev)) { 3771 if (IS_HASWELL(dev)) {
3656 ring_freq = (gpu_freq * 5 + 3) / 4; 3772 ring_freq = mult_frac(gpu_freq, 5, 4);
3657 ring_freq = max(min_ring_freq, ring_freq); 3773 ring_freq = max(min_ring_freq, ring_freq);
3658 /* leave ia_freq as the default, chosen by cpufreq */ 3774 /* leave ia_freq as the default, chosen by cpufreq */
3659 } else { 3775 } else {
@@ -3709,24 +3825,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3709 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3825 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3710} 3826}
3711 3827
3712static void vlv_rps_timer_work(struct work_struct *work)
3713{
3714 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3715 rps.vlv_work.work);
3716
3717 /*
3718 * Timer fired, we must be idle. Drop to min voltage state.
3719 * Note: we use RPe here since it should match the
3720 * Vmin we were shooting for. That should give us better
3721 * perf when we come back out of RC6 than if we used the
3722 * min freq available.
3723 */
3724 mutex_lock(&dev_priv->rps.hw_lock);
3725 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3726 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3727 mutex_unlock(&dev_priv->rps.hw_lock);
3728}
3729
3730static void valleyview_setup_pctx(struct drm_device *dev) 3828static void valleyview_setup_pctx(struct drm_device *dev)
3731{ 3829{
3732 struct drm_i915_private *dev_priv = dev->dev_private; 3830 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +3871,14 @@ static void valleyview_enable_rps(struct drm_device *dev)
3773{ 3871{
3774 struct drm_i915_private *dev_priv = dev->dev_private; 3872 struct drm_i915_private *dev_priv = dev->dev_private;
3775 struct intel_ring_buffer *ring; 3873 struct intel_ring_buffer *ring;
3776 u32 gtfifodbg, val; 3874 u32 gtfifodbg, val, rc6_mode = 0;
3777 int i; 3875 int i;
3778 3876
3779 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3877 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3780 3878
3781 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 3879 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3782 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 3880 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3881 gtfifodbg);
3783 I915_WRITE(GTFIFODBG, gtfifodbg); 3882 I915_WRITE(GTFIFODBG, gtfifodbg);
3784 } 3883 }
3785 3884
@@ -3812,9 +3911,13 @@ static void valleyview_enable_rps(struct drm_device *dev)
3812 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350); 3911 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3813 3912
3814 /* allows RC6 residency counter to work */ 3913 /* allows RC6 residency counter to work */
3815 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3)); 3914 I915_WRITE(VLV_COUNTER_CONTROL,
3816 I915_WRITE(GEN6_RC_CONTROL, 3915 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3817 GEN7_RC_CTL_TO_MODE); 3916 VLV_MEDIA_RC6_COUNT_EN |
3917 VLV_RENDER_RC6_COUNT_EN));
3918 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3919 rc6_mode = GEN7_RC_CTL_TO_MODE;
3920 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3818 3921
3819 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3922 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3820 switch ((val >> 6) & 3) { 3923 switch ((val >> 6) & 3) {
@@ -3864,8 +3967,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
3864 dev_priv->rps.rpe_delay), 3967 dev_priv->rps.rpe_delay),
3865 dev_priv->rps.rpe_delay); 3968 dev_priv->rps.rpe_delay);
3866 3969
3867 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3868
3869 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3970 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3870 3971
3871 gen6_enable_rps_interrupts(dev); 3972 gen6_enable_rps_interrupts(dev);
@@ -4605,8 +4706,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
4605 } else if (INTEL_INFO(dev)->gen >= 6) { 4706 } else if (INTEL_INFO(dev)->gen >= 6) {
4606 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4707 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4607 cancel_work_sync(&dev_priv->rps.work); 4708 cancel_work_sync(&dev_priv->rps.work);
4608 if (IS_VALLEYVIEW(dev))
4609 cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
4610 mutex_lock(&dev_priv->rps.hw_lock); 4709 mutex_lock(&dev_priv->rps.hw_lock);
4611 if (IS_VALLEYVIEW(dev)) 4710 if (IS_VALLEYVIEW(dev))
4612 valleyview_disable_rps(dev); 4711 valleyview_disable_rps(dev);
@@ -5267,6 +5366,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
5267 case POWER_DOMAIN_PIPE_A: 5366 case POWER_DOMAIN_PIPE_A:
5268 case POWER_DOMAIN_TRANSCODER_EDP: 5367 case POWER_DOMAIN_TRANSCODER_EDP:
5269 return true; 5368 return true;
5369 case POWER_DOMAIN_VGA:
5270 case POWER_DOMAIN_PIPE_B: 5370 case POWER_DOMAIN_PIPE_B:
5271 case POWER_DOMAIN_PIPE_C: 5371 case POWER_DOMAIN_PIPE_C:
5272 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 5372 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
@@ -5323,12 +5423,87 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5323 spin_lock_irqsave(&dev->vbl_lock, irqflags); 5423 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5324 for_each_pipe(p) 5424 for_each_pipe(p)
5325 if (p != PIPE_A) 5425 if (p != PIPE_A)
5326 dev->last_vblank[p] = 0; 5426 dev->vblank[p].last = 0;
5327 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 5427 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5328 } 5428 }
5329 } 5429 }
5330} 5430}
5331 5431
5432static void __intel_power_well_get(struct i915_power_well *power_well)
5433{
5434 if (!power_well->count++)
5435 __intel_set_power_well(power_well->device, true);
5436}
5437
5438static void __intel_power_well_put(struct i915_power_well *power_well)
5439{
5440 WARN_ON(!power_well->count);
5441 if (!--power_well->count)
5442 __intel_set_power_well(power_well->device, false);
5443}
5444
5445void intel_display_power_get(struct drm_device *dev,
5446 enum intel_display_power_domain domain)
5447{
5448 struct drm_i915_private *dev_priv = dev->dev_private;
5449 struct i915_power_well *power_well = &dev_priv->power_well;
5450
5451 if (!HAS_POWER_WELL(dev))
5452 return;
5453
5454 switch (domain) {
5455 case POWER_DOMAIN_PIPE_A:
5456 case POWER_DOMAIN_TRANSCODER_EDP:
5457 return;
5458 case POWER_DOMAIN_VGA:
5459 case POWER_DOMAIN_PIPE_B:
5460 case POWER_DOMAIN_PIPE_C:
5461 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5462 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5463 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5464 case POWER_DOMAIN_TRANSCODER_A:
5465 case POWER_DOMAIN_TRANSCODER_B:
5466 case POWER_DOMAIN_TRANSCODER_C:
5467 spin_lock_irq(&power_well->lock);
5468 __intel_power_well_get(power_well);
5469 spin_unlock_irq(&power_well->lock);
5470 return;
5471 default:
5472 BUG();
5473 }
5474}
5475
5476void intel_display_power_put(struct drm_device *dev,
5477 enum intel_display_power_domain domain)
5478{
5479 struct drm_i915_private *dev_priv = dev->dev_private;
5480 struct i915_power_well *power_well = &dev_priv->power_well;
5481
5482 if (!HAS_POWER_WELL(dev))
5483 return;
5484
5485 switch (domain) {
5486 case POWER_DOMAIN_PIPE_A:
5487 case POWER_DOMAIN_TRANSCODER_EDP:
5488 return;
5489 case POWER_DOMAIN_VGA:
5490 case POWER_DOMAIN_PIPE_B:
5491 case POWER_DOMAIN_PIPE_C:
5492 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5493 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5494 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5495 case POWER_DOMAIN_TRANSCODER_A:
5496 case POWER_DOMAIN_TRANSCODER_B:
5497 case POWER_DOMAIN_TRANSCODER_C:
5498 spin_lock_irq(&power_well->lock);
5499 __intel_power_well_put(power_well);
5500 spin_unlock_irq(&power_well->lock);
5501 return;
5502 default:
5503 BUG();
5504 }
5505}
5506
5332static struct i915_power_well *hsw_pwr; 5507static struct i915_power_well *hsw_pwr;
5333 5508
5334/* Display audio driver power well request */ 5509/* Display audio driver power well request */
@@ -5338,9 +5513,7 @@ void i915_request_power_well(void)
5338 return; 5513 return;
5339 5514
5340 spin_lock_irq(&hsw_pwr->lock); 5515 spin_lock_irq(&hsw_pwr->lock);
5341 if (!hsw_pwr->count++ && 5516 __intel_power_well_get(hsw_pwr);
5342 !hsw_pwr->i915_request)
5343 __intel_set_power_well(hsw_pwr->device, true);
5344 spin_unlock_irq(&hsw_pwr->lock); 5517 spin_unlock_irq(&hsw_pwr->lock);
5345} 5518}
5346EXPORT_SYMBOL_GPL(i915_request_power_well); 5519EXPORT_SYMBOL_GPL(i915_request_power_well);
@@ -5352,10 +5525,7 @@ void i915_release_power_well(void)
5352 return; 5525 return;
5353 5526
5354 spin_lock_irq(&hsw_pwr->lock); 5527 spin_lock_irq(&hsw_pwr->lock);
5355 WARN_ON(!hsw_pwr->count); 5528 __intel_power_well_put(hsw_pwr);
5356 if (!--hsw_pwr->count &&
5357 !hsw_pwr->i915_request)
5358 __intel_set_power_well(hsw_pwr->device, false);
5359 spin_unlock_irq(&hsw_pwr->lock); 5529 spin_unlock_irq(&hsw_pwr->lock);
5360} 5530}
5361EXPORT_SYMBOL_GPL(i915_release_power_well); 5531EXPORT_SYMBOL_GPL(i915_release_power_well);
@@ -5390,15 +5560,37 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
5390 return; 5560 return;
5391 5561
5392 spin_lock_irq(&power_well->lock); 5562 spin_lock_irq(&power_well->lock);
5563
5564 /*
5565 * This function will only ever contribute one
5566 * to the power well reference count. i915_request
5567 * is what tracks whether we have or have not
5568 * added the one to the reference count.
5569 */
5570 if (power_well->i915_request == enable)
5571 goto out;
5572
5393 power_well->i915_request = enable; 5573 power_well->i915_request = enable;
5394 5574
5395 /* only reject "disable" power well request */ 5575 if (enable)
5396 if (power_well->count && !enable) { 5576 __intel_power_well_get(power_well);
5397 spin_unlock_irq(&power_well->lock); 5577 else
5578 __intel_power_well_put(power_well);
5579
5580 out:
5581 spin_unlock_irq(&power_well->lock);
5582}
5583
5584static void intel_resume_power_well(struct drm_device *dev)
5585{
5586 struct drm_i915_private *dev_priv = dev->dev_private;
5587 struct i915_power_well *power_well = &dev_priv->power_well;
5588
5589 if (!HAS_POWER_WELL(dev))
5398 return; 5590 return;
5399 }
5400 5591
5401 __intel_set_power_well(dev, enable); 5592 spin_lock_irq(&power_well->lock);
5593 __intel_set_power_well(dev, power_well->count > 0);
5402 spin_unlock_irq(&power_well->lock); 5594 spin_unlock_irq(&power_well->lock);
5403} 5595}
5404 5596
@@ -5417,6 +5609,7 @@ void intel_init_power_well(struct drm_device *dev)
5417 5609
5418 /* For now, we need the power well to be always enabled. */ 5610 /* For now, we need the power well to be always enabled. */
5419 intel_set_power_well(dev, true); 5611 intel_set_power_well(dev, true);
5612 intel_resume_power_well(dev);
5420 5613
5421 /* We're taking over the BIOS, so clear any requests made by it since 5614 /* We're taking over the BIOS, so clear any requests made by it since
5422 * the driver is in charge now. */ 5615 * the driver is in charge now. */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee1026fca..b67104aaade5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
41 return space; 41 return space;
42} 42}
43 43
44void __intel_ring_advance(struct intel_ring_buffer *ring)
45{
46 struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48 ring->tail &= ring->size - 1;
49 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50 return;
51 ring->write_tail(ring, ring->tail);
52}
53
44static int 54static int
45gen2_render_ring_flush(struct intel_ring_buffer *ring, 55gen2_render_ring_flush(struct intel_ring_buffer *ring,
46 u32 invalidate_domains, 56 u32 invalidate_domains,
@@ -559,8 +569,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
559 if (INTEL_INFO(dev)->gen >= 6) 569 if (INTEL_INFO(dev)->gen >= 6)
560 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 570 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
561 571
562 if (HAS_L3_GPU_CACHE(dev)) 572 if (HAS_L3_DPF(dev))
563 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 573 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
564 574
565 return ret; 575 return ret;
566} 576}
@@ -593,7 +603,7 @@ update_mboxes(struct intel_ring_buffer *ring,
593#define MBOX_UPDATE_DWORDS 4 603#define MBOX_UPDATE_DWORDS 4
594 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 604 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
595 intel_ring_emit(ring, mmio_offset); 605 intel_ring_emit(ring, mmio_offset);
596 intel_ring_emit(ring, ring->outstanding_lazy_request); 606 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
597 intel_ring_emit(ring, MI_NOOP); 607 intel_ring_emit(ring, MI_NOOP);
598} 608}
599 609
@@ -629,9 +639,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
629 639
630 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 640 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
631 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 641 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
632 intel_ring_emit(ring, ring->outstanding_lazy_request); 642 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
633 intel_ring_emit(ring, MI_USER_INTERRUPT); 643 intel_ring_emit(ring, MI_USER_INTERRUPT);
634 intel_ring_advance(ring); 644 __intel_ring_advance(ring);
635 645
636 return 0; 646 return 0;
637} 647}
@@ -723,7 +733,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
723 PIPE_CONTROL_WRITE_FLUSH | 733 PIPE_CONTROL_WRITE_FLUSH |
724 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 734 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
725 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 735 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
726 intel_ring_emit(ring, ring->outstanding_lazy_request); 736 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
727 intel_ring_emit(ring, 0); 737 intel_ring_emit(ring, 0);
728 PIPE_CONTROL_FLUSH(ring, scratch_addr); 738 PIPE_CONTROL_FLUSH(ring, scratch_addr);
729 scratch_addr += 128; /* write to separate cachelines */ 739 scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +752,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
742 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 752 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
743 PIPE_CONTROL_NOTIFY); 753 PIPE_CONTROL_NOTIFY);
744 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 754 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
745 intel_ring_emit(ring, ring->outstanding_lazy_request); 755 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
746 intel_ring_emit(ring, 0); 756 intel_ring_emit(ring, 0);
747 intel_ring_advance(ring); 757 __intel_ring_advance(ring);
748 758
749 return 0; 759 return 0;
750} 760}
@@ -963,9 +973,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
963 973
964 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 974 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
965 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 975 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
966 intel_ring_emit(ring, ring->outstanding_lazy_request); 976 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
967 intel_ring_emit(ring, MI_USER_INTERRUPT); 977 intel_ring_emit(ring, MI_USER_INTERRUPT);
968 intel_ring_advance(ring); 978 __intel_ring_advance(ring);
969 979
970 return 0; 980 return 0;
971} 981}
@@ -987,10 +997,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
987 997
988 spin_lock_irqsave(&dev_priv->irq_lock, flags); 998 spin_lock_irqsave(&dev_priv->irq_lock, flags);
989 if (ring->irq_refcount++ == 0) { 999 if (ring->irq_refcount++ == 0) {
990 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1000 if (HAS_L3_DPF(dev) && ring->id == RCS)
991 I915_WRITE_IMR(ring, 1001 I915_WRITE_IMR(ring,
992 ~(ring->irq_enable_mask | 1002 ~(ring->irq_enable_mask |
993 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1003 GT_PARITY_ERROR(dev)));
994 else 1004 else
995 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1005 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
996 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1006 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1019,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1009 1019
1010 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1020 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1011 if (--ring->irq_refcount == 0) { 1021 if (--ring->irq_refcount == 0) {
1012 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1022 if (HAS_L3_DPF(dev) && ring->id == RCS)
1013 I915_WRITE_IMR(ring, 1023 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1014 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1015 else 1024 else
1016 I915_WRITE_IMR(ring, ~0); 1025 I915_WRITE_IMR(ring, ~0);
1017 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1026 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1414,6 +1423,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1414 if (ret != -ENOSPC) 1423 if (ret != -ENOSPC)
1415 return ret; 1424 return ret;
1416 1425
1426 /* force the tail write in case we have been skipping them */
1427 __intel_ring_advance(ring);
1428
1417 trace_i915_ring_wait_begin(ring); 1429 trace_i915_ring_wait_begin(ring);
1418 /* With GEM the hangcheck timer should kick us out of the loop, 1430 /* With GEM the hangcheck timer should kick us out of the loop,
1419 * leaving it early runs the risk of corrupting GEM state (due 1431 * leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1487,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1475 int ret; 1487 int ret;
1476 1488
1477 /* We need to add any requests required to flush the objects and ring */ 1489 /* We need to add any requests required to flush the objects and ring */
1478 if (ring->outstanding_lazy_request) { 1490 if (ring->outstanding_lazy_seqno) {
1479 ret = i915_add_request(ring, NULL); 1491 ret = i915_add_request(ring, NULL);
1480 if (ret) 1492 if (ret)
1481 return ret; 1493 return ret;
@@ -1495,10 +1507,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1495static int 1507static int
1496intel_ring_alloc_seqno(struct intel_ring_buffer *ring) 1508intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1497{ 1509{
1498 if (ring->outstanding_lazy_request) 1510 if (ring->outstanding_lazy_seqno)
1499 return 0; 1511 return 0;
1500 1512
1501 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); 1513 if (ring->preallocated_lazy_request == NULL) {
1514 struct drm_i915_gem_request *request;
1515
1516 request = kmalloc(sizeof(*request), GFP_KERNEL);
1517 if (request == NULL)
1518 return -ENOMEM;
1519
1520 ring->preallocated_lazy_request = request;
1521 }
1522
1523 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1502} 1524}
1503 1525
1504static int __intel_ring_begin(struct intel_ring_buffer *ring, 1526static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1567,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1545{ 1567{
1546 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1568 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1547 1569
1548 BUG_ON(ring->outstanding_lazy_request); 1570 BUG_ON(ring->outstanding_lazy_seqno);
1549 1571
1550 if (INTEL_INFO(ring->dev)->gen >= 6) { 1572 if (INTEL_INFO(ring->dev)->gen >= 6) {
1551 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1573 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1580,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1558 ring->hangcheck.seqno = seqno; 1580 ring->hangcheck.seqno = seqno;
1559} 1581}
1560 1582
1561void intel_ring_advance(struct intel_ring_buffer *ring)
1562{
1563 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1564
1565 ring->tail &= ring->size - 1;
1566 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
1567 return;
1568 ring->write_tail(ring, ring->tail);
1569}
1570
1571
1572static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1583static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1573 u32 value) 1584 u32 value)
1574{ 1585{
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68b1ca974d59..71a73f4fe252 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36enum intel_ring_hangcheck_action { 36enum intel_ring_hangcheck_action {
37 HANGCHECK_IDLE = 0,
37 HANGCHECK_WAIT, 38 HANGCHECK_WAIT,
38 HANGCHECK_ACTIVE, 39 HANGCHECK_ACTIVE,
39 HANGCHECK_KICK, 40 HANGCHECK_KICK,
@@ -140,7 +141,8 @@ struct intel_ring_buffer {
140 /** 141 /**
141 * Do we have some not yet emitted requests outstanding? 142 * Do we have some not yet emitted requests outstanding?
142 */ 143 */
143 u32 outstanding_lazy_request; 144 struct drm_i915_gem_request *preallocated_lazy_request;
145 u32 outstanding_lazy_seqno;
144 bool gpu_caches_dirty; 146 bool gpu_caches_dirty;
145 bool fbc_dirty; 147 bool fbc_dirty;
146 148
@@ -237,7 +239,12 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
237 iowrite32(data, ring->virtual_start + ring->tail); 239 iowrite32(data, ring->virtual_start + ring->tail);
238 ring->tail += 4; 240 ring->tail += 4;
239} 241}
240void intel_ring_advance(struct intel_ring_buffer *ring); 242static inline void intel_ring_advance(struct intel_ring_buffer *ring)
243{
244 ring->tail &= ring->size - 1;
245}
246void __intel_ring_advance(struct intel_ring_buffer *ring);
247
241int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 248int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
242void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); 249void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
243int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 250int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
258 265
259static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) 266static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
260{ 267{
261 BUG_ON(ring->outstanding_lazy_request == 0); 268 BUG_ON(ring->outstanding_lazy_seqno == 0);
262 return ring->outstanding_lazy_request; 269 return ring->outstanding_lazy_seqno;
263} 270}
264 271
265static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 272static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49482fd5b76c..a583e8f718a7 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
539 goto log_fail; 539 goto log_fail;
540 540
541 while ((status == SDVO_CMD_STATUS_PENDING || 541 while ((status == SDVO_CMD_STATUS_PENDING ||
542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) { 542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
543 if (retry < 10) 543 if (retry < 10)
544 msleep(15); 544 msleep(15);
545 else 545 else
@@ -1068,7 +1068,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1068 1068
1069static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config) 1069static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
1070{ 1070{
1071 unsigned dotclock = pipe_config->adjusted_mode.clock; 1071 unsigned dotclock = pipe_config->port_clock;
1072 struct dpll *clock = &pipe_config->dpll; 1072 struct dpll *clock = &pipe_config->dpll;
1073 1073
1074 /* SDVO TV has fixed PLL values depend on its clock range, 1074 /* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1133 */ 1133 */
1134 pipe_config->pixel_multiplier = 1134 pipe_config->pixel_multiplier =
1135 intel_sdvo_get_pixel_multiplier(adjusted_mode); 1135 intel_sdvo_get_pixel_multiplier(adjusted_mode);
1136 adjusted_mode->clock *= pipe_config->pixel_multiplier;
1137 1136
1138 if (intel_sdvo->color_range_auto) { 1137 if (intel_sdvo->color_range_auto) {
1139 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1138 /* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1217 !intel_sdvo_set_tv_format(intel_sdvo)) 1216 !intel_sdvo_set_tv_format(intel_sdvo))
1218 return; 1217 return;
1219 1218
1220 /* We have tried to get input timing in mode_fixup, and filled into
1221 * adjusted_mode.
1222 */
1223 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1219 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1224 input_dtd.part1.clock /= crtc->config.pixel_multiplier;
1225 1220
1226 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1221 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1227 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1222 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1330 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1325 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1331 struct intel_sdvo_dtd dtd; 1326 struct intel_sdvo_dtd dtd;
1332 int encoder_pixel_multiplier = 0; 1327 int encoder_pixel_multiplier = 0;
1328 int dotclock;
1333 u32 flags = 0, sdvox; 1329 u32 flags = 0, sdvox;
1334 u8 val; 1330 u8 val;
1335 bool ret; 1331 bool ret;
@@ -1368,6 +1364,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1368 >> SDVO_PORT_MULTIPLY_SHIFT) + 1; 1364 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
1369 } 1365 }
1370 1366
1367 dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
1368
1369 if (HAS_PCH_SPLIT(dev))
1370 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1371
1372 pipe_config->adjusted_mode.crtc_clock = dotclock;
1373
1371 /* Cross check the port pixel multiplier with the sdvo encoder state. */ 1374 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1372 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, 1375 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
1373 &val, 1)) { 1376 &val, 1)) {
@@ -1770,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1770{ 1773{
1771 struct edid *edid; 1774 struct edid *edid;
1772 1775
1776 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1777 connector->base.id, drm_get_connector_name(connector));
1778
1773 /* set the bus switch and get the modes */ 1779 /* set the bus switch and get the modes */
1774 edid = intel_sdvo_get_edid(connector); 1780 edid = intel_sdvo_get_edid(connector);
1775 1781
@@ -1865,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1865 uint32_t reply = 0, format_map = 0; 1871 uint32_t reply = 0, format_map = 0;
1866 int i; 1872 int i;
1867 1873
1874 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1875 connector->base.id, drm_get_connector_name(connector));
1876
1868 /* Read the list of supported input resolutions for the selected TV 1877 /* Read the list of supported input resolutions for the selected TV
1869 * format. 1878 * format.
1870 */ 1879 */
@@ -1899,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1899 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1908 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1900 struct drm_display_mode *newmode; 1909 struct drm_display_mode *newmode;
1901 1910
1911 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1912 connector->base.id, drm_get_connector_name(connector));
1913
1902 /* 1914 /*
1903 * Fetch modes from VBT. For SDVO prefer the VBT mode since some 1915 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1904 * SDVO->LVDS transcoders can't cope with the EDID mode. 1916 * SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1930 break; 1942 break;
1931 } 1943 }
1932 } 1944 }
1933
1934} 1945}
1935 1946
1936static int intel_sdvo_get_modes(struct drm_connector *connector) 1947static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1998 intel_sdvo_connector->tv_format); 2009 intel_sdvo_connector->tv_format);
1999 2010
2000 intel_sdvo_destroy_enhance_property(connector); 2011 intel_sdvo_destroy_enhance_property(connector);
2001 drm_sysfs_connector_remove(connector);
2002 drm_connector_cleanup(connector); 2012 drm_connector_cleanup(connector);
2003 kfree(intel_sdvo_connector); 2013 kfree(intel_sdvo_connector);
2004} 2014}
@@ -2394,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2394 struct intel_connector *intel_connector; 2404 struct intel_connector *intel_connector;
2395 struct intel_sdvo_connector *intel_sdvo_connector; 2405 struct intel_sdvo_connector *intel_sdvo_connector;
2396 2406
2397 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2407 DRM_DEBUG_KMS("initialising DVI device %d\n", device);
2408
2409 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2398 if (!intel_sdvo_connector) 2410 if (!intel_sdvo_connector)
2399 return false; 2411 return false;
2400 2412
@@ -2442,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2442 struct intel_connector *intel_connector; 2454 struct intel_connector *intel_connector;
2443 struct intel_sdvo_connector *intel_sdvo_connector; 2455 struct intel_sdvo_connector *intel_sdvo_connector;
2444 2456
2445 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2457 DRM_DEBUG_KMS("initialising TV type %d\n", type);
2458
2459 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2446 if (!intel_sdvo_connector) 2460 if (!intel_sdvo_connector)
2447 return false; 2461 return false;
2448 2462
@@ -2467,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2467 return true; 2481 return true;
2468 2482
2469err: 2483err:
2484 drm_sysfs_connector_remove(connector);
2470 intel_sdvo_destroy(connector); 2485 intel_sdvo_destroy(connector);
2471 return false; 2486 return false;
2472} 2487}
@@ -2479,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2479 struct intel_connector *intel_connector; 2494 struct intel_connector *intel_connector;
2480 struct intel_sdvo_connector *intel_sdvo_connector; 2495 struct intel_sdvo_connector *intel_sdvo_connector;
2481 2496
2482 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2497 DRM_DEBUG_KMS("initialising analog device %d\n", device);
2498
2499 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2483 if (!intel_sdvo_connector) 2500 if (!intel_sdvo_connector)
2484 return false; 2501 return false;
2485 2502
@@ -2510,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2510 struct intel_connector *intel_connector; 2527 struct intel_connector *intel_connector;
2511 struct intel_sdvo_connector *intel_sdvo_connector; 2528 struct intel_sdvo_connector *intel_sdvo_connector;
2512 2529
2513 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2530 DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
2531
2532 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2514 if (!intel_sdvo_connector) 2533 if (!intel_sdvo_connector)
2515 return false; 2534 return false;
2516 2535
@@ -2534,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2534 return true; 2553 return true;
2535 2554
2536err: 2555err:
2556 drm_sysfs_connector_remove(connector);
2537 intel_sdvo_destroy(connector); 2557 intel_sdvo_destroy(connector);
2538 return false; 2558 return false;
2539} 2559}
@@ -2605,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2605 2625
2606 list_for_each_entry_safe(connector, tmp, 2626 list_for_each_entry_safe(connector, tmp,
2607 &dev->mode_config.connector_list, head) { 2627 &dev->mode_config.connector_list, head) {
2608 if (intel_attached_encoder(connector) == &intel_sdvo->base) 2628 if (intel_attached_encoder(connector) == &intel_sdvo->base) {
2629 drm_sysfs_connector_remove(connector);
2609 intel_sdvo_destroy(connector); 2630 intel_sdvo_destroy(connector);
2631 }
2610 } 2632 }
2611} 2633}
2612 2634
@@ -2876,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2876 struct intel_encoder *intel_encoder; 2898 struct intel_encoder *intel_encoder;
2877 struct intel_sdvo *intel_sdvo; 2899 struct intel_sdvo *intel_sdvo;
2878 int i; 2900 int i;
2879 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2901 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
2880 if (!intel_sdvo) 2902 if (!intel_sdvo)
2881 return false; 2903 return false;
2882 2904
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9a0e6c5ea540..acd1cfe8b7dd 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -101,19 +101,83 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
101 return val; 101 return val;
102} 102}
103 103
104u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg) 104u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
105{ 105{
106 u32 val = 0; 106 u32 val = 0;
107 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
108 PUNIT_OPCODE_REG_READ, reg, &val);
109 return val;
110}
107 111
108 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO, 112void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
109 DPIO_OPCODE_REG_READ, reg, &val); 113{
114 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
115 PUNIT_OPCODE_REG_WRITE, reg, &val);
116}
117
118u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
119{
120 u32 val = 0;
121 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
122 PUNIT_OPCODE_REG_READ, reg, &val);
123 return val;
124}
125
126void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
127{
128 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
129 PUNIT_OPCODE_REG_WRITE, reg, &val);
130}
131
132u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
133{
134 u32 val = 0;
135 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
136 PUNIT_OPCODE_REG_READ, reg, &val);
137 return val;
138}
139
140void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
141{
142 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
143 PUNIT_OPCODE_REG_WRITE, reg, &val);
144}
145
146u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
147{
148 u32 val = 0;
149 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
150 PUNIT_OPCODE_REG_READ, reg, &val);
151 return val;
152}
153
154void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
155{
156 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
157 PUNIT_OPCODE_REG_WRITE, reg, &val);
158}
110 159
160static u32 vlv_get_phy_port(enum pipe pipe)
161{
162 u32 port = IOSF_PORT_DPIO;
163
164 WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
165
166 return port;
167}
168
169u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
170{
171 u32 val = 0;
172
173 vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
174 DPIO_OPCODE_REG_READ, reg, &val);
111 return val; 175 return val;
112} 176}
113 177
114void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val) 178void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
115{ 179{
116 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO, 180 vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
117 DPIO_OPCODE_REG_WRITE, reg, &val); 181 DPIO_OPCODE_REG_WRITE, reg, &val);
118} 182}
119 183
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ad6ec4b39005..cae10bc746d0 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -288,7 +288,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
288 dev_priv->sprite_scaling_enabled |= 1 << pipe; 288 dev_priv->sprite_scaling_enabled |= 1 << pipe;
289 289
290 if (!scaling_was_enabled) { 290 if (!scaling_was_enabled) {
291 intel_update_watermarks(dev); 291 intel_update_watermarks(crtc);
292 intel_wait_for_vblank(dev, pipe); 292 intel_wait_for_vblank(dev, pipe);
293 } 293 }
294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -323,7 +323,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
323 323
324 /* potentially re-enable LP watermarks */ 324 /* potentially re-enable LP watermarks */
325 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 325 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
326 intel_update_watermarks(dev); 326 intel_update_watermarks(crtc);
327} 327}
328 328
329static void 329static void
@@ -349,7 +349,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
349 349
350 /* potentially re-enable LP watermarks */ 350 /* potentially re-enable LP watermarks */
351 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 351 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
352 intel_update_watermarks(dev); 352 intel_update_watermarks(crtc);
353} 353}
354 354
355static int 355static int
@@ -652,8 +652,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
652 .y2 = crtc_y + crtc_h, 652 .y2 = crtc_y + crtc_h,
653 }; 653 };
654 const struct drm_rect clip = { 654 const struct drm_rect clip = {
655 .x2 = crtc->mode.hdisplay, 655 .x2 = intel_crtc->config.pipe_src_w,
656 .y2 = crtc->mode.vdisplay, 656 .y2 = intel_crtc->config.pipe_src_h,
657 }; 657 };
658 658
659 intel_fb = to_intel_framebuffer(fb); 659 intel_fb = to_intel_framebuffer(fb);
@@ -1034,7 +1034,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1034 if (INTEL_INFO(dev)->gen < 5) 1034 if (INTEL_INFO(dev)->gen < 5)
1035 return -ENODEV; 1035 return -ENODEV;
1036 1036
1037 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); 1037 intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
1038 if (!intel_plane) 1038 if (!intel_plane)
1039 return -ENOMEM; 1039 return -ENOMEM;
1040 1040
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84bf6c22..d61aec23a523 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -912,7 +912,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
912 if (!tv_mode) 912 if (!tv_mode)
913 return false; 913 return false;
914 914
915 pipe_config->adjusted_mode.clock = tv_mode->clock; 915 pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
@@ -1044,7 +1044,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1044 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; 1044 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
1045 1045
1046 /* Enable two fixes for the chips that need them. */ 1046 /* Enable two fixes for the chips that need them. */
1047 if (dev->pci_device < 0x2772) 1047 if (dev->pdev->device < 0x2772)
1048 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; 1048 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
1049 1049
1050 I915_WRITE(TV_H_CTL_1, hctl1); 1050 I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1433,7 +1433,6 @@ intel_tv_get_modes(struct drm_connector *connector)
1433static void 1433static void
1434intel_tv_destroy(struct drm_connector *connector) 1434intel_tv_destroy(struct drm_connector *connector)
1435{ 1435{
1436 drm_sysfs_connector_remove(connector);
1437 drm_connector_cleanup(connector); 1436 drm_connector_cleanup(connector);
1438 kfree(connector); 1437 kfree(connector);
1439} 1438}
@@ -1518,7 +1517,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1518static int tv_is_present_in_vbt(struct drm_device *dev) 1517static int tv_is_present_in_vbt(struct drm_device *dev)
1519{ 1518{
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1519 struct drm_i915_private *dev_priv = dev->dev_private;
1521 struct child_device_config *p_child; 1520 union child_device_config *p_child;
1522 int i, ret; 1521 int i, ret;
1523 1522
1524 if (!dev_priv->vbt.child_dev_num) 1523 if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
1530 /* 1529 /*
1531 * If the device type is not TV, continue. 1530 * If the device type is not TV, continue.
1532 */ 1531 */
1533 if (p_child->device_type != DEVICE_TYPE_INT_TV && 1532 if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
1534 p_child->device_type != DEVICE_TYPE_TV) 1533 p_child->old.device_type != DEVICE_TYPE_TV)
1535 continue; 1534 continue;
1536 /* Only when the addin_offset is non-zero, it is regarded 1535 /* Only when the addin_offset is non-zero, it is regarded
1537 * as present. 1536 * as present.
1538 */ 1537 */
1539 if (p_child->addin_offset) { 1538 if (p_child->old.addin_offset) {
1540 ret = 1; 1539 ret = 1;
1541 break; 1540 break;
1542 } 1541 }
@@ -1590,12 +1589,12 @@ intel_tv_init(struct drm_device *dev)
1590 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1589 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1591 return; 1590 return;
1592 1591
1593 intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL); 1592 intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
1594 if (!intel_tv) { 1593 if (!intel_tv) {
1595 return; 1594 return;
1596 } 1595 }
1597 1596
1598 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1597 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1599 if (!intel_connector) { 1598 if (!intel_connector) {
1600 kfree(intel_tv); 1599 kfree(intel_tv);
1601 return; 1600 return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c36b00..288a3a654f06 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -204,6 +204,18 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
204 gen6_gt_check_fifodbg(dev_priv); 204 gen6_gt_check_fifodbg(dev_priv);
205} 205}
206 206
207static void gen6_force_wake_work(struct work_struct *work)
208{
209 struct drm_i915_private *dev_priv =
210 container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
211 unsigned long irqflags;
212
213 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
214 if (--dev_priv->uncore.forcewake_count == 0)
215 dev_priv->uncore.funcs.force_wake_put(dev_priv);
216 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
217}
218
207void intel_uncore_early_sanitize(struct drm_device *dev) 219void intel_uncore_early_sanitize(struct drm_device *dev)
208{ 220{
209 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -216,6 +228,9 @@ void intel_uncore_init(struct drm_device *dev)
216{ 228{
217 struct drm_i915_private *dev_priv = dev->dev_private; 229 struct drm_i915_private *dev_priv = dev->dev_private;
218 230
231 INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
232 gen6_force_wake_work);
233
219 if (IS_VALLEYVIEW(dev)) { 234 if (IS_VALLEYVIEW(dev)) {
220 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; 235 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
221 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; 236 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
@@ -261,6 +276,16 @@ void intel_uncore_init(struct drm_device *dev)
261 } 276 }
262} 277}
263 278
279void intel_uncore_fini(struct drm_device *dev)
280{
281 struct drm_i915_private *dev_priv = dev->dev_private;
282
283 flush_delayed_work(&dev_priv->uncore.force_wake_work);
284
285 /* Paranoia: make sure we have disabled everything before we exit. */
286 intel_uncore_sanitize(dev);
287}
288
264static void intel_uncore_forcewake_reset(struct drm_device *dev) 289static void intel_uncore_forcewake_reset(struct drm_device *dev)
265{ 290{
266 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -276,10 +301,26 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
276 301
277void intel_uncore_sanitize(struct drm_device *dev) 302void intel_uncore_sanitize(struct drm_device *dev)
278{ 303{
304 struct drm_i915_private *dev_priv = dev->dev_private;
305 u32 reg_val;
306
279 intel_uncore_forcewake_reset(dev); 307 intel_uncore_forcewake_reset(dev);
280 308
281 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 309 /* BIOS often leaves RC6 enabled, but disable it for hw init */
282 intel_disable_gt_powersave(dev); 310 intel_disable_gt_powersave(dev);
311
312 /* Turn off power gate, require especially for the BIOS less system */
313 if (IS_VALLEYVIEW(dev)) {
314
315 mutex_lock(&dev_priv->rps.hw_lock);
316 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
317
318 if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
319 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
320
321 mutex_unlock(&dev_priv->rps.hw_lock);
322
323 }
283} 324}
284 325
285/* 326/*
@@ -306,8 +347,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
306 unsigned long irqflags; 347 unsigned long irqflags;
307 348
308 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 349 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309 if (--dev_priv->uncore.forcewake_count == 0) 350 if (--dev_priv->uncore.forcewake_count == 0) {
310 dev_priv->uncore.funcs.force_wake_put(dev_priv); 351 dev_priv->uncore.forcewake_count++;
352 mod_delayed_work(dev_priv->wq,
353 &dev_priv->uncore.force_wake_work,
354 1);
355 }
311 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 356 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
312} 357}
313 358
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index cc3166dd445a..087db33f6cff 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
406 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); 406 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
407 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); 407 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
408 408
409 dev->counters += 3;
410 dev->types[6] = _DRM_STAT_IRQ;
411 dev->types[7] = _DRM_STAT_PRIMARY;
412 dev->types[8] = _DRM_STAT_SECONDARY;
413
414 ret = drm_vblank_init(dev, 1); 409 ret = drm_vblank_init(dev, 1);
415 410
416 if (ret) { 411 if (ret) {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 598c281def0a..2b0ceb8dc11b 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev)
169 /* Disable *all* interrupts */ 169 /* Disable *all* interrupts */
170 MGA_WRITE(MGA_IEN, 0); 170 MGA_WRITE(MGA_IEN, 0);
171 171
172 dev->irq_enabled = 0; 172 dev->irq_enabled = false;
173} 173}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index fcce7b2f8011..f15ea3c4a90a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -99,7 +99,6 @@ static struct drm_driver driver = {
99 .minor = DRIVER_MINOR, 99 .minor = DRIVER_MINOR,
100 .patchlevel = DRIVER_PATCHLEVEL, 100 .patchlevel = DRIVER_PATCHLEVEL,
101 101
102 .gem_init_object = mgag200_gem_init_object,
103 .gem_free_object = mgag200_gem_free_object, 102 .gem_free_object = mgag200_gem_free_object,
104 .dumb_create = mgag200_dumb_create, 103 .dumb_create = mgag200_dumb_create,
105 .dumb_map_offset = mgag200_dumb_mmap_offset, 104 .dumb_map_offset = mgag200_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index baaae19332e2..cf11ee68a6d9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev);
260int mgag200_gem_create(struct drm_device *dev, 260int mgag200_gem_create(struct drm_device *dev,
261 u32 size, bool iskernel, 261 u32 size, bool iskernel,
262 struct drm_gem_object **obj); 262 struct drm_gem_object **obj);
263int mgag200_gem_init_object(struct drm_gem_object *obj);
264int mgag200_dumb_create(struct drm_file *file, 263int mgag200_dumb_create(struct drm_file *file,
265 struct drm_device *dev, 264 struct drm_device *dev,
266 struct drm_mode_create_dumb *args); 265 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 0f8b861b10b3..b1120cb1db6d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
310 return 0; 310 return 0;
311} 311}
312 312
313int mgag200_gem_init_object(struct drm_gem_object *obj)
314{
315 BUG();
316 return 0;
317}
318
319void mgag200_bo_unref(struct mgag200_bo **bo) 313void mgag200_bo_unref(struct mgag200_bo **bo)
320{ 314{
321 struct ttm_buffer_object *tbo; 315 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2e70462883e8..2a15b98b4d2b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
210 sim_data.nvclk_khz = NVClk; 210 sim_data.nvclk_khz = NVClk;
211 sim_data.bpp = bpp; 211 sim_data.bpp = bpp;
212 sim_data.two_heads = nv_two_heads(dev); 212 sim_data.two_heads = nv_two_heads(dev);
213 if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || 213 if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
214 (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { 214 (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
215 uint32_t type; 215 uint32_t type;
216 216
217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); 217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
256 256
257 if (nv_device(drm->device)->card_type < NV_20) 257 if (nv_device(drm->device)->card_type < NV_20)
258 nv04_update_arb(dev, vclk, bpp, burst, lwm); 258 nv04_update_arb(dev, vclk, bpp, burst, lwm);
259 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 259 else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
260 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 260 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
261 *burst = 128; 261 *burst = 128;
262 *lwm = 0x0480; 262 *lwm = 0x0480;
263 } else 263 } else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11360fe..0e3270c3ffd2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
326 regp->MiscOutReg = 0x23; /* +hsync +vsync */ 326 regp->MiscOutReg = 0x23; /* +hsync +vsync */
327 } 327 }
328 328
329 regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
330
331 /* 329 /*
332 * Time Sequencer 330 * Time Sequencer
333 */ 331 */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 93dd23ff0093..59d1c040b84f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
490 /* BIOS scripts usually take care of the backlight, thanks 490 /* BIOS scripts usually take care of the backlight, thanks
491 * Apple for your consistency. 491 * Apple for your consistency.
492 */ 492 */
493 if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 || 493 if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
494 dev->pci_device == 0x0189 || dev->pci_device == 0x0329) { 494 dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
495 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); 496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); 497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 9928187f0a7d..2cf65e0b517e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -127,7 +127,7 @@ static inline bool
127nv_two_heads(struct drm_device *dev) 127nv_two_heads(struct drm_device *dev)
128{ 128{
129 struct nouveau_drm *drm = nouveau_drm(dev); 129 struct nouveau_drm *drm = nouveau_drm(dev);
130 const int impl = dev->pci_device & 0x0ff0; 130 const int impl = dev->pdev->device & 0x0ff0;
131 131
132 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && 132 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
133 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) 133 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +139,14 @@ nv_two_heads(struct drm_device *dev)
139static inline bool 139static inline bool
140nv_gf4_disp_arch(struct drm_device *dev) 140nv_gf4_disp_arch(struct drm_device *dev)
141{ 141{
142 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110; 142 return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
143} 143}
144 144
145static inline bool 145static inline bool
146nv_two_reg_pll(struct drm_device *dev) 146nv_two_reg_pll(struct drm_device *dev)
147{ 147{
148 struct nouveau_drm *drm = nouveau_drm(dev); 148 struct nouveau_drm *drm = nouveau_drm(dev);
149 const int impl = dev->pci_device & 0x0ff0; 149 const int impl = dev->pdev->device & 0x0ff0;
150 150
151 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) 151 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
152 return true; 152 return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b86207..f8dee834527f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -220,7 +220,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
220 int ret; 220 int ret;
221 221
222 if (plltype == PLL_MEMORY && 222 if (plltype == PLL_MEMORY &&
223 (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { 223 (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
224 uint32_t mpllP; 224 uint32_t mpllP;
225 225
226 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 226 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +230,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
230 return 400000 / mpllP; 230 return 400000 / mpllP;
231 } else 231 } else
232 if (plltype == PLL_MEMORY && 232 if (plltype == PLL_MEMORY &&
233 (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { 233 (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
234 uint32_t clock; 234 uint32_t clock;
235 235
236 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 236 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 8f467e7bfd19..72055a35f845 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -130,7 +130,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
130 if (chan->ntfy) { 130 if (chan->ntfy) {
131 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); 131 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
132 nouveau_bo_unpin(chan->ntfy); 132 nouveau_bo_unpin(chan->ntfy);
133 drm_gem_object_unreference_unlocked(chan->ntfy->gem); 133 drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
134 } 134 }
135 135
136 if (chan->heap.block_size) 136 if (chan->heap.block_size)
@@ -178,10 +178,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
178 getparam->value = device->chipset; 178 getparam->value = device->chipset;
179 break; 179 break;
180 case NOUVEAU_GETPARAM_PCI_VENDOR: 180 case NOUVEAU_GETPARAM_PCI_VENDOR:
181 getparam->value = dev->pci_vendor; 181 getparam->value = dev->pdev->vendor;
182 break; 182 break;
183 case NOUVEAU_GETPARAM_PCI_DEVICE: 183 case NOUVEAU_GETPARAM_PCI_DEVICE:
184 getparam->value = dev->pci_device; 184 getparam->value = dev->pdev->device;
185 break; 185 break;
186 case NOUVEAU_GETPARAM_BUS_TYPE: 186 case NOUVEAU_GETPARAM_BUS_TYPE:
187 if (drm_pci_device_is_agp(dev)) 187 if (drm_pci_device_is_agp(dev))
@@ -320,7 +320,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
320 goto done; 320 goto done;
321 } 321 }
322 322
323 ret = drm_gem_handle_create(file_priv, chan->ntfy->gem, 323 ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
324 &init->notifier_handle); 324 &init->notifier_handle);
325 if (ret) 325 if (ret)
326 goto done; 326 goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2ffad2176b7f..630f6e84fc01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -82,7 +82,7 @@ nv40_backlight_init(struct drm_connector *connector)
82 memset(&props, 0, sizeof(struct backlight_properties)); 82 memset(&props, 0, sizeof(struct backlight_properties));
83 props.type = BACKLIGHT_RAW; 83 props.type = BACKLIGHT_RAW;
84 props.max_brightness = 31; 84 props.max_brightness = 31;
85 bd = backlight_device_register("nv_backlight", &connector->kdev, drm, 85 bd = backlight_device_register("nv_backlight", connector->kdev, drm,
86 &nv40_bl_ops, &props); 86 &nv40_bl_ops, &props);
87 if (IS_ERR(bd)) 87 if (IS_ERR(bd))
88 return PTR_ERR(bd); 88 return PTR_ERR(bd);
@@ -204,7 +204,7 @@ nv50_backlight_init(struct drm_connector *connector)
204 memset(&props, 0, sizeof(struct backlight_properties)); 204 memset(&props, 0, sizeof(struct backlight_properties));
205 props.type = BACKLIGHT_RAW; 205 props.type = BACKLIGHT_RAW;
206 props.max_brightness = 100; 206 props.max_brightness = 100;
207 bd = backlight_device_register("nv_backlight", &connector->kdev, 207 bd = backlight_device_register("nv_backlight", connector->kdev,
208 nv_encoder, ops, &props); 208 nv_encoder, ops, &props);
209 if (IS_ERR(bd)) 209 if (IS_ERR(bd))
210 return PTR_ERR(bd); 210 return PTR_ERR(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 3e7287675ecf..4c3feaaa1037 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
127#ifdef __powerpc__ 127#ifdef __powerpc__
128 /* Powerbook specific quirks */ 128 /* Powerbook specific quirks */
129 if (script == LVDS_RESET && 129 if (script == LVDS_RESET &&
130 (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || 130 (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
131 dev->pci_device == 0x0329)) 131 dev->pdev->device == 0x0329))
132 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 132 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
133#endif 133#endif
134 134
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 755c38d06271..4172854d4365 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -146,7 +146,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
146 struct drm_device *dev = drm->dev; 146 struct drm_device *dev = drm->dev;
147 struct nouveau_bo *nvbo = nouveau_bo(bo); 147 struct nouveau_bo *nvbo = nouveau_bo(bo);
148 148
149 if (unlikely(nvbo->gem)) 149 if (unlikely(nvbo->gem.filp))
150 DRM_ERROR("bo %p still attached to GEM object\n", bo); 150 DRM_ERROR("bo %p still attached to GEM object\n", bo);
151 WARN_ON(nvbo->pin_refcnt > 0); 151 WARN_ON(nvbo->pin_refcnt > 0);
152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -1267,7 +1267,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1267{ 1267{
1268 struct nouveau_bo *nvbo = nouveau_bo(bo); 1268 struct nouveau_bo *nvbo = nouveau_bo(bo);
1269 1269
1270 return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp); 1270 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1271} 1271}
1272 1272
1273static int 1273static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 653dbbbd4fa1..ff17c1f432fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -27,7 +27,10 @@ struct nouveau_bo {
27 u32 tile_flags; 27 u32 tile_flags;
28 struct nouveau_drm_tile *tile; 28 struct nouveau_drm_tile *tile;
29 29
30 struct drm_gem_object *gem; 30 /* Only valid if allocated via nouveau_gem_new() and iff you hold a
31 * gem reference to it! For debugging, use gem.filp != NULL to test
32 * whether it is valid. */
33 struct drm_gem_object gem;
31 34
32 /* protect by the ttm reservation lock */ 35 /* protect by the ttm reservation lock */
33 int pin_refcnt; 36 int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c5b36f9e9a10..2136d0038252 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -215,8 +215,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
215 connector->doublescan_allowed = true; 215 connector->doublescan_allowed = true;
216 if (nv_device(drm->device)->card_type == NV_20 || 216 if (nv_device(drm->device)->card_type == NV_20 ||
217 (nv_device(drm->device)->card_type == NV_10 && 217 (nv_device(drm->device)->card_type == NV_10 &&
218 (dev->pci_device & 0x0ff0) != 0x0100 && 218 (dev->pdev->device & 0x0ff0) != 0x0100 &&
219 (dev->pci_device & 0x0ff0) != 0x0150)) 219 (dev->pdev->device & 0x0ff0) != 0x0150))
220 /* HW is broken */ 220 /* HW is broken */
221 connector->interlace_allowed = false; 221 connector->interlace_allowed = false;
222 else 222 else
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590f5568..bdd5cf71a24c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -50,7 +50,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
50 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 50 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
51 51
52 if (fb->nvbo) 52 if (fb->nvbo)
53 drm_gem_object_unreference_unlocked(fb->nvbo->gem); 53 drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
54 54
55 drm_framebuffer_cleanup(drm_fb); 55 drm_framebuffer_cleanup(drm_fb);
56 kfree(fb); 56 kfree(fb);
@@ -63,7 +63,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
63{ 63{
64 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 64 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
65 65
66 return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle); 66 return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
67} 67}
68 68
69static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { 69static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -674,8 +674,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
674 if (ret) 674 if (ret)
675 return ret; 675 return ret;
676 676
677 ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); 677 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
678 drm_gem_object_unreference_unlocked(bo->gem); 678 drm_gem_object_unreference_unlocked(&bo->gem);
679 return ret; 679 return ret;
680} 680}
681 681
@@ -688,7 +688,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
688 688
689 gem = drm_gem_object_lookup(dev, file_priv, handle); 689 gem = drm_gem_object_lookup(dev, file_priv, handle);
690 if (gem) { 690 if (gem) {
691 struct nouveau_bo *bo = gem->driver_private; 691 struct nouveau_bo *bo = nouveau_gem_object(gem);
692 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 692 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
693 drm_gem_object_unreference_unlocked(gem); 693 drm_gem_object_unreference_unlocked(gem);
694 return 0; 694 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e893c5362402..428d818be775 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -834,7 +834,6 @@ driver = {
834 .gem_prime_vmap = nouveau_gem_prime_vmap, 834 .gem_prime_vmap = nouveau_gem_prime_vmap,
835 .gem_prime_vunmap = nouveau_gem_prime_vunmap, 835 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
836 836
837 .gem_init_object = nouveau_gem_object_new,
838 .gem_free_object = nouveau_gem_object_del, 837 .gem_free_object = nouveau_gem_object_del,
839 .gem_open_object = nouveau_gem_object_open, 838 .gem_open_object = nouveau_gem_object_open,
840 .gem_close_object = nouveau_gem_object_close, 839 .gem_close_object = nouveau_gem_object_close,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a86ecf65c164..c80b519b513a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
420 nouveau_bo_unmap(nouveau_fb->nvbo); 420 nouveau_bo_unmap(nouveau_fb->nvbo);
421 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); 421 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
422 nouveau_bo_unpin(nouveau_fb->nvbo); 422 nouveau_bo_unpin(nouveau_fb->nvbo);
423 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 423 drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
424 nouveau_fb->nvbo = NULL; 424 nouveau_fb->nvbo = NULL;
425 } 425 }
426 drm_fb_helper_fini(&fbcon->helper); 426 drm_fb_helper_fini(&fbcon->helper);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f32b71238c03..418a6177a653 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -34,29 +34,20 @@
34#include "nouveau_ttm.h" 34#include "nouveau_ttm.h"
35#include "nouveau_gem.h" 35#include "nouveau_gem.h"
36 36
37int
38nouveau_gem_object_new(struct drm_gem_object *gem)
39{
40 return 0;
41}
42
43void 37void
44nouveau_gem_object_del(struct drm_gem_object *gem) 38nouveau_gem_object_del(struct drm_gem_object *gem)
45{ 39{
46 struct nouveau_bo *nvbo = gem->driver_private; 40 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
47 struct ttm_buffer_object *bo = &nvbo->bo; 41 struct ttm_buffer_object *bo = &nvbo->bo;
48 42
49 if (!nvbo)
50 return;
51 nvbo->gem = NULL;
52
53 if (gem->import_attach) 43 if (gem->import_attach)
54 drm_prime_gem_destroy(gem, nvbo->bo.sg); 44 drm_prime_gem_destroy(gem, nvbo->bo.sg);
55 45
56 ttm_bo_unref(&bo);
57
58 drm_gem_object_release(gem); 46 drm_gem_object_release(gem);
59 kfree(gem); 47
48 /* reset filp so nouveau_bo_del_ttm() can test for it */
49 gem->filp = NULL;
50 ttm_bo_unref(&bo);
60} 51}
61 52
62int 53int
@@ -186,14 +177,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
186 if (nv_device(drm->device)->card_type >= NV_50) 177 if (nv_device(drm->device)->card_type >= NV_50)
187 nvbo->valid_domains &= domain; 178 nvbo->valid_domains &= domain;
188 179
189 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 180 /* Initialize the embedded gem-object. We return a single gem-reference
190 if (!nvbo->gem) { 181 * to the caller, instead of a normal nouveau_bo ttm reference. */
182 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
183 if (ret) {
191 nouveau_bo_ref(NULL, pnvbo); 184 nouveau_bo_ref(NULL, pnvbo);
192 return -ENOMEM; 185 return -ENOMEM;
193 } 186 }
194 187
195 nvbo->bo.persistent_swap_storage = nvbo->gem->filp; 188 nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
196 nvbo->gem->driver_private = nvbo;
197 return 0; 189 return 0;
198} 190}
199 191
@@ -250,15 +242,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
250 if (ret) 242 if (ret)
251 return ret; 243 return ret;
252 244
253 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 245 ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
254 if (ret == 0) { 246 if (ret == 0) {
255 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); 247 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
256 if (ret) 248 if (ret)
257 drm_gem_handle_delete(file_priv, req->info.handle); 249 drm_gem_handle_delete(file_priv, req->info.handle);
258 } 250 }
259 251
260 /* drop reference from allocate - handle holds it now */ 252 /* drop reference from allocate - handle holds it now */
261 drm_gem_object_unreference_unlocked(nvbo->gem); 253 drm_gem_object_unreference_unlocked(&nvbo->gem);
262 return ret; 254 return ret;
263} 255}
264 256
@@ -266,7 +258,7 @@ static int
266nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 258nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
267 uint32_t write_domains, uint32_t valid_domains) 259 uint32_t write_domains, uint32_t valid_domains)
268{ 260{
269 struct nouveau_bo *nvbo = gem->driver_private; 261 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
270 struct ttm_buffer_object *bo = &nvbo->bo; 262 struct ttm_buffer_object *bo = &nvbo->bo;
271 uint32_t domains = valid_domains & nvbo->valid_domains & 263 uint32_t domains = valid_domains & nvbo->valid_domains &
272 (write_domains ? write_domains : read_domains); 264 (write_domains ? write_domains : read_domains);
@@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
327 list_del(&nvbo->entry); 319 list_del(&nvbo->entry);
328 nvbo->reserved_by = NULL; 320 nvbo->reserved_by = NULL;
329 ttm_bo_unreserve_ticket(&nvbo->bo, ticket); 321 ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
330 drm_gem_object_unreference_unlocked(nvbo->gem); 322 drm_gem_object_unreference_unlocked(&nvbo->gem);
331 } 323 }
332} 324}
333 325
@@ -376,7 +368,7 @@ retry:
376 validate_fini(op, NULL); 368 validate_fini(op, NULL);
377 return -ENOENT; 369 return -ENOENT;
378 } 370 }
379 nvbo = gem->driver_private; 371 nvbo = nouveau_gem_object(gem);
380 if (nvbo == res_bo) { 372 if (nvbo == res_bo) {
381 res_bo = NULL; 373 res_bo = NULL;
382 drm_gem_object_unreference_unlocked(gem); 374 drm_gem_object_unreference_unlocked(gem);
@@ -478,7 +470,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
478 return ret; 470 return ret;
479 } 471 }
480 472
481 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 473 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
482 b->write_domains, 474 b->write_domains,
483 b->valid_domains); 475 b->valid_domains);
484 if (unlikely(ret)) { 476 if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 502e4290aa8f..7caca057bc38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -12,14 +12,13 @@
12static inline struct nouveau_bo * 12static inline struct nouveau_bo *
13nouveau_gem_object(struct drm_gem_object *gem) 13nouveau_gem_object(struct drm_gem_object *gem)
14{ 14{
15 return gem ? gem->driver_private : NULL; 15 return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
16} 16}
17 17
18/* nouveau_gem.c */ 18/* nouveau_gem.c */
19extern int nouveau_gem_new(struct drm_device *, int size, int align, 19extern int nouveau_gem_new(struct drm_device *, int size, int align,
20 uint32_t domain, uint32_t tile_mode, 20 uint32_t domain, uint32_t tile_mode,
21 uint32_t tile_flags, struct nouveau_bo **); 21 uint32_t tile_flags, struct nouveau_bo **);
22extern int nouveau_gem_object_new(struct drm_gem_object *);
23extern void nouveau_gem_object_del(struct drm_gem_object *); 22extern void nouveau_gem_object_del(struct drm_gem_object *);
24extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); 23extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
25extern void nouveau_gem_object_close(struct drm_gem_object *, 24extern void nouveau_gem_object_close(struct drm_gem_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e90468d5e5c0..51a2cb102b44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
71 return ERR_PTR(ret); 71 return ERR_PTR(ret);
72 72
73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
74 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 74
75 if (!nvbo->gem) { 75 /* Initialize the embedded gem-object. We return a single gem-reference
76 * to the caller, instead of a normal nouveau_bo ttm reference. */
77 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
78 if (ret) {
76 nouveau_bo_ref(NULL, &nvbo); 79 nouveau_bo_ref(NULL, &nvbo);
77 return ERR_PTR(-ENOMEM); 80 return ERR_PTR(-ENOMEM);
78 } 81 }
79 82
80 nvbo->gem->driver_private = nvbo; 83 return &nvbo->gem;
81 return nvbo->gem;
82} 84}
83 85
84int nouveau_gem_prime_pin(struct drm_gem_object *obj) 86int nouveau_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2603d909f49c..e7fa3cd96743 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = {
620 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 620 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
621 .gem_prime_export = omap_gem_prime_export, 621 .gem_prime_export = omap_gem_prime_export,
622 .gem_prime_import = omap_gem_prime_import, 622 .gem_prime_import = omap_gem_prime_import,
623 .gem_init_object = omap_gem_init_object,
624 .gem_free_object = omap_gem_free_object, 623 .gem_free_object = omap_gem_free_object,
625 .gem_vm_ops = &omap_gem_vm_ops, 624 .gem_vm_ops = &omap_gem_vm_ops,
626 .dumb_create = omap_gem_dumb_create, 625 .dumb_create = omap_gem_dumb_create,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 30b95b736658..07847693cf49 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
220int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 220int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
221 union omap_gem_size gsize, uint32_t flags, uint32_t *handle); 221 union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
222void omap_gem_free_object(struct drm_gem_object *obj); 222void omap_gem_free_object(struct drm_gem_object *obj);
223int omap_gem_init_object(struct drm_gem_object *obj);
224void *omap_gem_vaddr(struct drm_gem_object *obj); 223void *omap_gem_vaddr(struct drm_gem_object *obj);
225int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 224int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
226 uint32_t handle, uint64_t *offset); 225 uint32_t handle, uint64_t *offset);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6ebec531..5aec3e81fe24 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1274,11 +1274,6 @@ unlock:
1274 return ret; 1274 return ret;
1275} 1275}
1276 1276
1277int omap_gem_init_object(struct drm_gem_object *obj)
1278{
1279 return -EINVAL; /* unused */
1280}
1281
1282/* don't call directly.. called from GEM core when it is time to actually 1277/* don't call directly.. called from GEM core when it is time to actually
1283 * free the object.. 1278 * free the object..
1284 */ 1279 */
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 9263db117ff8..cb858600185f 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev)
261 mutex_unlock(&dev->struct_mutex); 261 mutex_unlock(&dev->struct_mutex);
262 return -EBUSY; 262 return -EBUSY;
263 } 263 }
264 dev->irq_enabled = 1; 264 dev->irq_enabled = true;
265 mutex_unlock(&dev->struct_mutex); 265 mutex_unlock(&dev->struct_mutex);
266 266
267 /* Before installing handler */ 267 /* Before installing handler */
@@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev)
272 272
273 if (ret < 0) { 273 if (ret < 0) {
274 mutex_lock(&dev->struct_mutex); 274 mutex_lock(&dev->struct_mutex);
275 dev->irq_enabled = 0; 275 dev->irq_enabled = false;
276 mutex_unlock(&dev->struct_mutex); 276 mutex_unlock(&dev->struct_mutex);
277 return ret; 277 return ret;
278 } 278 }
@@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev)
283 283
284 if (ret < 0) { 284 if (ret < 0) {
285 mutex_lock(&dev->struct_mutex); 285 mutex_lock(&dev->struct_mutex);
286 dev->irq_enabled = 0; 286 dev->irq_enabled = false;
287 mutex_unlock(&dev->struct_mutex); 287 mutex_unlock(&dev->struct_mutex);
288 dispc_free_irq(dev); 288 dispc_free_irq(dev);
289 } 289 }
@@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev)
294int omap_drm_irq_uninstall(struct drm_device *dev) 294int omap_drm_irq_uninstall(struct drm_device *dev)
295{ 295{
296 unsigned long irqflags; 296 unsigned long irqflags;
297 int irq_enabled, i; 297 bool irq_enabled;
298 int i;
298 299
299 mutex_lock(&dev->struct_mutex); 300 mutex_lock(&dev->struct_mutex);
300 irq_enabled = dev->irq_enabled; 301 irq_enabled = dev->irq_enabled;
301 dev->irq_enabled = 0; 302 dev->irq_enabled = false;
302 mutex_unlock(&dev->struct_mutex); 303 mutex_unlock(&dev->struct_mutex);
303 304
304 /* 305 /*
@@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
307 if (dev->num_crtcs) { 308 if (dev->num_crtcs) {
308 spin_lock_irqsave(&dev->vbl_lock, irqflags); 309 spin_lock_irqsave(&dev->vbl_lock, irqflags);
309 for (i = 0; i < dev->num_crtcs; i++) { 310 for (i = 0; i < dev->num_crtcs; i++) {
310 DRM_WAKEUP(&dev->vbl_queue[i]); 311 DRM_WAKEUP(&dev->vblank[i].queue);
311 dev->vblank_enabled[i] = 0; 312 dev->vblank[i].enabled = false;
312 dev->last_vblank[i] = 313 dev->vblank[i].last =
313 dev->driver->get_vblank_counter(dev, i); 314 dev->driver->get_vblank_counter(dev, i);
314 } 315 }
315 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 316 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 514118ae72d4..fee8748bdca5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = {
225 .debugfs_init = qxl_debugfs_init, 225 .debugfs_init = qxl_debugfs_init,
226 .debugfs_cleanup = qxl_debugfs_takedown, 226 .debugfs_cleanup = qxl_debugfs_takedown,
227#endif 227#endif
228 .gem_init_object = qxl_gem_object_init,
229 .gem_free_object = qxl_gem_object_free, 228 .gem_free_object = qxl_gem_object_free,
230 .gem_open_object = qxl_gem_object_open, 229 .gem_open_object = qxl_gem_object_open,
231 .gem_close_object = qxl_gem_object_close, 230 .gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f7c9adde46a0..41d22ed26060 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -412,7 +412,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
412 struct qxl_surface *surf, 412 struct qxl_surface *surf,
413 struct qxl_bo **qobj, 413 struct qxl_bo **qobj,
414 uint32_t *handle); 414 uint32_t *handle);
415int qxl_gem_object_init(struct drm_gem_object *obj);
416void qxl_gem_object_free(struct drm_gem_object *gobj); 415void qxl_gem_object_free(struct drm_gem_object *gobj);
417int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); 416int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
418void qxl_gem_object_close(struct drm_gem_object *obj, 417void qxl_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 1648e4125af7..b96f0c9d89b2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -28,12 +28,6 @@
28#include "qxl_drv.h" 28#include "qxl_drv.h"
29#include "qxl_object.h" 29#include "qxl_object.h"
30 30
31int qxl_gem_object_init(struct drm_gem_object *obj)
32{
33 /* we do nothings here */
34 return 0;
35}
36
37void qxl_gem_object_free(struct drm_gem_object *gobj) 31void qxl_gem_object_free(struct drm_gem_object *gobj)
38{ 32{
39 struct qxl_bo *qobj = gem_to_qxl_bo(gobj); 33 struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 00885417ffff..fb3ae07a1469 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
690 690
691 /* set the lane count on the sink */ 691 /* set the lane count on the sink */
692 tmp = dp_info->dp_lane_count; 692 tmp = dp_info->dp_lane_count;
693 if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 && 693 if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
694 dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
695 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 694 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
696 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); 695 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
697 696
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 32923d2f6002..28e2dc48e015 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
213 props.type = BACKLIGHT_RAW; 213 props.type = BACKLIGHT_RAW;
214 snprintf(bl_name, sizeof(bl_name), 214 snprintf(bl_name, sizeof(bl_name),
215 "radeon_bl%d", dev->primary->index); 215 "radeon_bl%d", dev->primary->index);
216 bd = backlight_device_register(bl_name, &drm_connector->kdev, 216 bd = backlight_device_register(bl_name, drm_connector->kdev,
217 pdata, &radeon_atom_backlight_ops, &props); 217 pdata, &radeon_atom_backlight_ops, &props);
218 if (IS_ERR(bd)) { 218 if (IS_ERR(bd)) {
219 DRM_ERROR("Backlight registration failed\n"); 219 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227dae0c..c155d6f3fa68 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
499 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 499 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
500 fp2_gen_cntl = 0; 500 fp2_gen_cntl = 0;
501 501
502 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 502 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
503 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 503 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
504 } 504 }
505 505
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
536 (RADEON_CRTC_SYNC_TRISTAT | 536 (RADEON_CRTC_SYNC_TRISTAT |
537 RADEON_CRTC_DISPLAY_DIS))); 537 RADEON_CRTC_DISPLAY_DIS)));
538 538
539 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 539 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
540 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); 540 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
541 } 541 }
542 542
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
554 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 554 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
555 } 555 }
556 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 556 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
557 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 557 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
558 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); 558 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
559 } 559 }
560 return r; 560 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cdd12dcd988b..22f685827b7e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -100,7 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
100int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 100int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
101void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 101void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
103int radeon_gem_object_init(struct drm_gem_object *obj);
104void radeon_gem_object_free(struct drm_gem_object *obj); 103void radeon_gem_object_free(struct drm_gem_object *obj);
105int radeon_gem_object_open(struct drm_gem_object *obj, 104int radeon_gem_object_open(struct drm_gem_object *obj,
106 struct drm_file *file_priv); 105 struct drm_file *file_priv);
@@ -408,7 +407,6 @@ static struct drm_driver kms_driver = {
408 .irq_uninstall = radeon_driver_irq_uninstall_kms, 407 .irq_uninstall = radeon_driver_irq_uninstall_kms,
409 .irq_handler = radeon_driver_irq_handler_kms, 408 .irq_handler = radeon_driver_irq_handler_kms,
410 .ioctls = radeon_ioctls_kms, 409 .ioctls = radeon_ioctls_kms,
411 .gem_init_object = radeon_gem_object_init,
412 .gem_free_object = radeon_gem_object_free, 410 .gem_free_object = radeon_gem_object_free,
413 .gem_open_object = radeon_gem_object_open, 411 .gem_open_object = radeon_gem_object_open,
414 .gem_close_object = radeon_gem_object_close, 412 .gem_close_object = radeon_gem_object_close,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8a5835..805c5e566b9a 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31 31
32int radeon_gem_object_init(struct drm_gem_object *obj)
33{
34 BUG();
35
36 return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj) 32void radeon_gem_object_free(struct drm_gem_object *gobj)
40{ 33{
41 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580ddc4eb2..d6b36766e8c9 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -191,7 +191,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
191 191
192 switch (info->request) { 192 switch (info->request) {
193 case RADEON_INFO_DEVICE_ID: 193 case RADEON_INFO_DEVICE_ID:
194 *value = dev->pci_device; 194 *value = dev->pdev->device;
195 break; 195 break;
196 case RADEON_INFO_NUM_GB_PIPES: 196 case RADEON_INFO_NUM_GB_PIPES:
197 *value = rdev->num_gb_pipes; 197 *value = rdev->num_gb_pipes;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512f5c8d..c89971d904c3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
392 props.type = BACKLIGHT_RAW; 392 props.type = BACKLIGHT_RAW;
393 snprintf(bl_name, sizeof(bl_name), 393 snprintf(bl_name, sizeof(bl_name),
394 "radeon_bl%d", dev->primary->index); 394 "radeon_bl%d", dev->primary->index);
395 bd = backlight_device_register(bl_name, &drm_connector->kdev, 395 bd = backlight_device_register(bl_name, drm_connector->kdev,
396 pdata, &radeon_backlight_ops, &props); 396 pdata, &radeon_backlight_ops, &props);
397 if (IS_ERR(bd)) { 397 if (IS_ERR(bd)) {
398 DRM_ERROR("Backlight registration failed\n"); 398 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 7650dc0d78ce..3ddd6cd98ac1 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -77,7 +77,6 @@ static struct drm_driver driver = {
77 .unload = udl_driver_unload, 77 .unload = udl_driver_unload,
78 78
79 /* gem hooks */ 79 /* gem hooks */
80 .gem_init_object = udl_gem_init_object,
81 .gem_free_object = udl_gem_free_object, 80 .gem_free_object = udl_gem_free_object,
82 .gem_vm_ops = &udl_gem_vm_ops, 81 .gem_vm_ops = &udl_gem_vm_ops,
83 82
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 56aec9409fa3..1fbf7b357f16 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv,
115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, 115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
116 uint32_t handle, uint64_t *offset); 116 uint32_t handle, uint64_t *offset);
117 117
118int udl_gem_init_object(struct drm_gem_object *obj);
119void udl_gem_free_object(struct drm_gem_object *gem_obj); 118void udl_gem_free_object(struct drm_gem_object *gem_obj);
120struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 119struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
121 size_t size); 120 size_t size);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8bf646183bac..24ffbe990736 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 107 }
108} 108}
109 109
110int udl_gem_init_object(struct drm_gem_object *obj)
111{
112 BUG();
113
114 return 0;
115}
116
117static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
118{ 111{
119 struct page **pages; 112 struct page **pages;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 7e3ad87c366c..927889105483 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
79 79
80 /* Linux specific until context tracking code gets ported to BSD */ 80 /* Linux specific until context tracking code gets ported to BSD */
81 /* Last context, perform cleanup */ 81 /* Last context, perform cleanup */
82 if (dev->ctx_count == 1 && dev->dev_private) { 82 if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
83 DRM_DEBUG("Last Context\n"); 83 DRM_DEBUG("Last Context\n");
84 drm_irq_uninstall(dev); 84 drm_irq_uninstall(dev);
85 via_cleanup_futex(dev_priv); 85 via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index 8c61ceeaa12d..df7d90a3a4fa 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -264,7 +264,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
264 * core, so we need to set this manually in order to allow the 264 * core, so we need to set this manually in order to allow the
265 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 265 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
266 */ 266 */
267 drm->irq_enabled = 1; 267 drm->irq_enabled = true;
268 268
269 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 269 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
270 if (err < 0) 270 if (err < 0)