aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/Kconfig69
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c6
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c102
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c18
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c16
-rw-r--r--drivers/gpu/drm/drm_drv.c72
-rw-r--r--drivers/gpu/drm/drm_edid.c277
-rw-r--r--drivers/gpu/drm/drm_edid_load.c108
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c17
-rw-r--r--drivers/gpu/drm/drm_fops.c68
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_irq.c152
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c41
-rw-r--r--drivers/gpu/drm/drm_pci.c65
-rw-r--r--drivers/gpu/drm/drm_platform.c59
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/drm_stub.c301
-rw-r--r--drivers/gpu/drm/drm_usb.c57
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/gma500/Kconfig1
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c22
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c11
-rw-r--r--drivers/gpu/drm/i915/Kconfig67
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/dvo.h11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1092
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c118
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c153
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h396
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c555
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c61
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c366
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c628
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h673
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c15
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c100
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h62
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c195
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h87
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c127
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1602
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c671
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h563
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c620
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h102
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c427
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h109
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c317
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c12
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c (renamed from drivers/gpu/drm/i915/intel_fb.c)33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c81
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c64
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c434
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c207
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1172
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c81
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c52
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c79
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c192
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c377
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c5
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c10
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/host1x/drm/Kconfig1
-rw-r--r--drivers/gpu/host1x/drm/drm.c2
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c8
-rw-r--r--include/drm/drmP.h60
-rw-r--r--include/drm/drm_crtc.h18
-rw-r--r--include/drm/drm_dp_helper.h31
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/uapi/drm/drm.h37
-rw-r--r--include/uapi/drm/drm_mode.h45
-rw-r--r--include/uapi/drm/i915_drm.h8
148 files changed, 10006 insertions, 4105 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 955555d6ec88..b4e4fc0d6650 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -29,11 +29,17 @@ config DRM_USB
29config DRM_KMS_HELPER 29config DRM_KMS_HELPER
30 tristate 30 tristate
31 depends on DRM 31 depends on DRM
32 help
33 CRTC helpers for KMS drivers.
34
35config DRM_KMS_FB_HELPER
36 bool
37 depends on DRM_KMS_HELPER
32 select FB 38 select FB
33 select FRAMEBUFFER_CONSOLE if !EXPERT 39 select FRAMEBUFFER_CONSOLE if !EXPERT
34 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE 40 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
35 help 41 help
36 FB and CRTC helpers for KMS drivers. 42 FBDEV helpers for KMS drivers.
37 43
38config DRM_LOAD_EDID_FIRMWARE 44config DRM_LOAD_EDID_FIRMWARE
39 bool "Allow to specify an EDID data set instead of probing for it" 45 bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER
64config DRM_KMS_CMA_HELPER 70config DRM_KMS_CMA_HELPER
65 bool 71 bool
66 select DRM_GEM_CMA_HELPER 72 select DRM_GEM_CMA_HELPER
73 select DRM_KMS_FB_HELPER
67 select FB_SYS_FILLRECT 74 select FB_SYS_FILLRECT
68 select FB_SYS_COPYAREA 75 select FB_SYS_COPYAREA
69 select FB_SYS_IMAGEBLIT 76 select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@ config DRM_RADEON
96 select FB_CFB_IMAGEBLIT 103 select FB_CFB_IMAGEBLIT
97 select FW_LOADER 104 select FW_LOADER
98 select DRM_KMS_HELPER 105 select DRM_KMS_HELPER
106 select DRM_KMS_FB_HELPER
99 select DRM_TTM 107 select DRM_TTM
100 select POWER_SUPPLY 108 select POWER_SUPPLY
101 select HWMON 109 select HWMON
@@ -120,64 +128,7 @@ config DRM_I810
120 selected, the module will be called i810. AGP support is required 128 selected, the module will be called i810. AGP support is required
121 for this driver to work. 129 for this driver to work.
122 130
123config DRM_I915 131source "drivers/gpu/drm/i915/Kconfig"
124 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
125 depends on DRM
126 depends on AGP
127 depends on AGP_INTEL
128 # we need shmfs for the swappable backing store, and in particular
129 # the shmem_readpage() which depends upon tmpfs
130 select SHMEM
131 select TMPFS
132 select DRM_KMS_HELPER
133 select FB_CFB_FILLRECT
134 select FB_CFB_COPYAREA
135 select FB_CFB_IMAGEBLIT
136 # i915 depends on ACPI_VIDEO when ACPI is enabled
137 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
138 select BACKLIGHT_LCD_SUPPORT if ACPI
139 select BACKLIGHT_CLASS_DEVICE if ACPI
140 select VIDEO_OUTPUT_CONTROL if ACPI
141 select INPUT if ACPI
142 select THERMAL if ACPI
143 select ACPI_VIDEO if ACPI
144 select ACPI_BUTTON if ACPI
145 help
146 Choose this option if you have a system that has "Intel Graphics
147 Media Accelerator" or "HD Graphics" integrated graphics,
148 including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
149 G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
150 Core i5, Core i7 as well as Atom CPUs with integrated graphics.
151 If M is selected, the module will be called i915. AGP support
152 is required for this driver to work. This driver is used by
153 the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
154 replaces the older i830 module that supported a subset of the
155 hardware in older X.org releases.
156
157 Note that the older i810/i815 chipsets require the use of the
158 i810 driver instead, and the Atom z5xx series has an entirely
159 different implementation.
160
161config DRM_I915_KMS
162 bool "Enable modesetting on intel by default"
163 depends on DRM_I915
164 help
165 Choose this option if you want kernel modesetting enabled by default,
166 and you have a new enough userspace to support this. Running old
167 userspaces with this enabled will cause pain. Note that this causes
168 the driver to bind to PCI devices, which precludes loading things
169 like intelfb.
170
171config DRM_I915_PRELIMINARY_HW_SUPPORT
172 bool "Enable preliminary support for prerelease Intel hardware by default"
173 depends on DRM_I915
174 help
175 Choose this option if you have prerelease Intel hardware and want the
176 i915 driver to support it by default. You can enable such support at
177 runtime with the module option i915.preliminary_hw_support=1; this
178 option changes the default for that module option.
179
180 If in doubt, say "N".
181 132
182config DRM_MGA 133config DRM_MGA
183 tristate "Matrox g200/g400" 134 tristate "Matrox g200/g400"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f089adfe70ee..5af240bfd29f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
21 21
22drm-usb-y := drm_usb.o 22drm-usb-y := drm_usb.o
23 23
24drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o 24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
25drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 25drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
26drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
26drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 27drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
27 28
28obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 29obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da4a51eae824..8a784c460c89 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -6,6 +6,7 @@ config DRM_AST
6 select FB_SYS_FILLRECT 6 select FB_SYS_FILLRECT
7 select FB_SYS_IMAGEBLIT 7 select FB_SYS_IMAGEBLIT
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER
9 select DRM_TTM 10 select DRM_TTM
10 help 11 help
11 Say yes for experimental AST GPU driver. Do not enable 12 Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 32e270dc714e..5137f15dba19 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -211,7 +211,6 @@ static struct drm_driver driver = {
211 .minor = DRIVER_MINOR, 211 .minor = DRIVER_MINOR,
212 .patchlevel = DRIVER_PATCHLEVEL, 212 .patchlevel = DRIVER_PATCHLEVEL,
213 213
214 .gem_init_object = ast_gem_init_object,
215 .gem_free_object = ast_gem_free_object, 214 .gem_free_object = ast_gem_free_object,
216 .dumb_create = ast_dumb_create, 215 .dumb_create = ast_dumb_create,
217 .dumb_map_offset = ast_dumb_mmap_offset, 216 .dumb_map_offset = ast_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8492b68e873c..9833a1b1acc1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
323 struct drm_device *dev, 323 struct drm_device *dev,
324 struct drm_mode_create_dumb *args); 324 struct drm_mode_create_dumb *args);
325 325
326extern int ast_gem_init_object(struct drm_gem_object *obj);
327extern void ast_gem_free_object(struct drm_gem_object *obj); 326extern void ast_gem_free_object(struct drm_gem_object *obj);
328extern int ast_dumb_mmap_offset(struct drm_file *file, 327extern int ast_dumb_mmap_offset(struct drm_file *file,
329 struct drm_device *dev, 328 struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d374ca..af0b868a9dfd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
449 return 0; 449 return 0;
450} 450}
451 451
452int ast_gem_init_object(struct drm_gem_object *obj)
453{
454 BUG();
455 return 0;
456}
457
458void ast_bo_unref(struct ast_bo **bo) 452void ast_bo_unref(struct ast_bo **bo)
459{ 453{
460 struct ttm_buffer_object *tbo; 454 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index bf67b22723f9..9864559e5fb9 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU
5 select FB_SYS_COPYAREA 5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT 6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select DRM_TTM 9 select DRM_TTM
9 help 10 help
10 This is a KMS driver for emulated cirrus device in qemu. 11 This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d91782..953fc8aea69c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -97,7 +97,6 @@ static struct drm_driver driver = {
97 .major = DRIVER_MAJOR, 97 .major = DRIVER_MAJOR,
98 .minor = DRIVER_MINOR, 98 .minor = DRIVER_MINOR,
99 .patchlevel = DRIVER_PATCHLEVEL, 99 .patchlevel = DRIVER_PATCHLEVEL,
100 .gem_init_object = cirrus_gem_init_object,
101 .gem_free_object = cirrus_gem_free_object, 100 .gem_free_object = cirrus_gem_free_object,
102 .dumb_create = cirrus_dumb_create, 101 .dumb_create = cirrus_dumb_create,
103 .dumb_map_offset = cirrus_dumb_mmap_offset, 102 .dumb_map_offset = cirrus_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 9b0bb9184afd..b6aded73838b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
191 struct pci_dev *pdev, 191 struct pci_dev *pdev,
192 uint32_t flags); 192 uint32_t flags);
193void cirrus_device_fini(struct cirrus_device *cdev); 193void cirrus_device_fini(struct cirrus_device *cdev);
194int cirrus_gem_init_object(struct drm_gem_object *obj);
195void cirrus_gem_free_object(struct drm_gem_object *obj); 194void cirrus_gem_free_object(struct drm_gem_object *obj);
196int cirrus_dumb_mmap_offset(struct drm_file *file, 195int cirrus_dumb_mmap_offset(struct drm_file *file,
197 struct drm_device *dev, 196 struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index f130a533a512..78e76f24343d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
255 return 0; 255 return 0;
256} 256}
257 257
258int cirrus_gem_init_object(struct drm_gem_object *obj)
259{
260 BUG();
261 return 0;
262}
263
264void cirrus_bo_unref(struct cirrus_bo **bo) 258void cirrus_bo_unref(struct cirrus_bo **bo)
265{ 259{
266 struct ttm_buffer_object *tbo; 260 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 224ff965bcf7..a4b017b6849e 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
334 334
335 mutex_lock(&dev->ctxlist_mutex); 335 mutex_lock(&dev->ctxlist_mutex);
336 list_add(&ctx_entry->head, &dev->ctxlist); 336 list_add(&ctx_entry->head, &dev->ctxlist);
337 ++dev->ctx_count;
338 mutex_unlock(&dev->ctxlist_mutex); 337 mutex_unlock(&dev->ctxlist_mutex);
339 338
340 return 0; 339 return 0;
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
432 if (pos->handle == ctx->handle) { 431 if (pos->handle == ctx->handle) {
433 list_del(&pos->head); 432 list_del(&pos->head);
434 kfree(pos); 433 kfree(pos);
435 --dev->ctx_count;
436 } 434 }
437 } 435 }
438 } 436 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bff2fa941f60..d7a8370e3cdc 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
202 { DRM_MODE_CONNECTOR_TV, "TV" }, 202 { DRM_MODE_CONNECTOR_TV, "TV" },
203 { DRM_MODE_CONNECTOR_eDP, "eDP" }, 203 { DRM_MODE_CONNECTOR_eDP, "eDP" },
204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, 204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
205 { DRM_MODE_CONNECTOR_DSI, "DSI" },
205}; 206};
206 207
207static const struct drm_prop_enum_list drm_encoder_enum_list[] = 208static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
211 { DRM_MODE_ENCODER_LVDS, "LVDS" }, 212 { DRM_MODE_ENCODER_LVDS, "LVDS" },
212 { DRM_MODE_ENCODER_TVDAC, "TV" }, 213 { DRM_MODE_ENCODER_TVDAC, "TV" },
213 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 214 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
215 { DRM_MODE_ENCODER_DSI, "DSI" },
214}; 216};
215 217
216void drm_connector_ida_init(void) 218void drm_connector_ida_init(void)
@@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
1317 if (in->clock > INT_MAX || in->vrefresh > INT_MAX) 1319 if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
1318 return -ERANGE; 1320 return -ERANGE;
1319 1321
1322 if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
1323 return -EINVAL;
1324
1320 out->clock = in->clock; 1325 out->clock = in->clock;
1321 out->hdisplay = in->hdisplay; 1326 out->hdisplay = in->hdisplay;
1322 out->hsync_start = in->hsync_start; 1327 out->hsync_start = in->hsync_start;
@@ -1579,6 +1584,19 @@ out:
1579 return ret; 1584 return ret;
1580} 1585}
1581 1586
1587static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1588 const struct drm_file *file_priv)
1589{
1590 /*
1591 * If user-space hasn't configured the driver to expose the stereo 3D
1592 * modes, don't expose them.
1593 */
1594 if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
1595 return false;
1596
1597 return true;
1598}
1599
1582/** 1600/**
1583 * drm_mode_getconnector - get connector configuration 1601 * drm_mode_getconnector - get connector configuration
1584 * @dev: drm device for the ioctl 1602 * @dev: drm device for the ioctl
@@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1644 1662
1645 /* delayed so we get modes regardless of pre-fill_modes state */ 1663 /* delayed so we get modes regardless of pre-fill_modes state */
1646 list_for_each_entry(mode, &connector->modes, head) 1664 list_for_each_entry(mode, &connector->modes, head)
1647 mode_count++; 1665 if (drm_mode_expose_to_userspace(mode, file_priv))
1666 mode_count++;
1648 1667
1649 out_resp->connector_id = connector->base.id; 1668 out_resp->connector_id = connector->base.id;
1650 out_resp->connector_type = connector->connector_type; 1669 out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1666 copied = 0; 1685 copied = 0;
1667 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; 1686 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
1668 list_for_each_entry(mode, &connector->modes, head) { 1687 list_for_each_entry(mode, &connector->modes, head) {
1688 if (!drm_mode_expose_to_userspace(mode, file_priv))
1689 continue;
1690
1669 drm_crtc_convert_to_umode(&u_mode, mode); 1691 drm_crtc_convert_to_umode(&u_mode, mode);
1670 if (copy_to_user(mode_ptr + copied, 1692 if (copy_to_user(mode_ptr + copied,
1671 &u_mode, sizeof(u_mode))) { 1693 &u_mode, sizeof(u_mode))) {
@@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2040} 2062}
2041EXPORT_SYMBOL(drm_mode_set_config_internal); 2063EXPORT_SYMBOL(drm_mode_set_config_internal);
2042 2064
2065/*
2066 * Checks that the framebuffer is big enough for the CRTC viewport
2067 * (x, y, hdisplay, vdisplay)
2068 */
2069static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2070 int x, int y,
2071 const struct drm_display_mode *mode,
2072 const struct drm_framebuffer *fb)
2073
2074{
2075 int hdisplay, vdisplay;
2076
2077 hdisplay = mode->hdisplay;
2078 vdisplay = mode->vdisplay;
2079
2080 if (drm_mode_is_stereo(mode)) {
2081 struct drm_display_mode adjusted = *mode;
2082
2083 drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
2084 hdisplay = adjusted.crtc_hdisplay;
2085 vdisplay = adjusted.crtc_vdisplay;
2086 }
2087
2088 if (crtc->invert_dimensions)
2089 swap(hdisplay, vdisplay);
2090
2091 if (hdisplay > fb->width ||
2092 vdisplay > fb->height ||
2093 x > fb->width - hdisplay ||
2094 y > fb->height - vdisplay) {
2095 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2096 fb->width, fb->height, hdisplay, vdisplay, x, y,
2097 crtc->invert_dimensions ? " (inverted)" : "");
2098 return -ENOSPC;
2099 }
2100
2101 return 0;
2102}
2103
2043/** 2104/**
2044 * drm_mode_setcrtc - set CRTC configuration 2105 * drm_mode_setcrtc - set CRTC configuration
2045 * @dev: drm device for the ioctl 2106 * @dev: drm device for the ioctl
@@ -2087,7 +2148,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2087 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 2148 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
2088 2149
2089 if (crtc_req->mode_valid) { 2150 if (crtc_req->mode_valid) {
2090 int hdisplay, vdisplay;
2091 /* If we have a mode we need a framebuffer. */ 2151 /* If we have a mode we need a framebuffer. */
2092 /* If we pass -1, set the mode with the currently bound fb */ 2152 /* If we pass -1, set the mode with the currently bound fb */
2093 if (crtc_req->fb_id == -1) { 2153 if (crtc_req->fb_id == -1) {
@@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2123 2183
2124 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 2184 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
2125 2185
2126 hdisplay = mode->hdisplay; 2186 ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
2127 vdisplay = mode->vdisplay; 2187 mode, fb);
2128 2188 if (ret)
2129 if (crtc->invert_dimensions)
2130 swap(hdisplay, vdisplay);
2131
2132 if (hdisplay > fb->width ||
2133 vdisplay > fb->height ||
2134 crtc_req->x > fb->width - hdisplay ||
2135 crtc_req->y > fb->height - vdisplay) {
2136 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2137 fb->width, fb->height,
2138 hdisplay, vdisplay, crtc_req->x, crtc_req->y,
2139 crtc->invert_dimensions ? " (inverted)" : "");
2140 ret = -ENOSPC;
2141 goto out; 2189 goto out;
2142 } 2190
2143 } 2191 }
2144 2192
2145 if (crtc_req->count_connectors == 0 && mode) { 2193 if (crtc_req->count_connectors == 0 && mode) {
@@ -3556,7 +3604,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3556 struct drm_framebuffer *fb = NULL, *old_fb = NULL; 3604 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
3557 struct drm_pending_vblank_event *e = NULL; 3605 struct drm_pending_vblank_event *e = NULL;
3558 unsigned long flags; 3606 unsigned long flags;
3559 int hdisplay, vdisplay;
3560 int ret = -EINVAL; 3607 int ret = -EINVAL;
3561 3608
3562 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 3609 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3588,22 +3635,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3588 if (!fb) 3635 if (!fb)
3589 goto out; 3636 goto out;
3590 3637
3591 hdisplay = crtc->mode.hdisplay; 3638 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
3592 vdisplay = crtc->mode.vdisplay; 3639 if (ret)
3593
3594 if (crtc->invert_dimensions)
3595 swap(hdisplay, vdisplay);
3596
3597 if (hdisplay > fb->width ||
3598 vdisplay > fb->height ||
3599 crtc->x > fb->width - hdisplay ||
3600 crtc->y > fb->height - vdisplay) {
3601 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
3602 fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
3603 crtc->invert_dimensions ? " (inverted)" : "");
3604 ret = -ENOSPC;
3605 goto out; 3640 goto out;
3606 }
3607 3641
3608 if (crtc->fb->pixel_format != fb->pixel_format) { 3642 if (crtc->fb->pixel_format != fb->pixel_format) {
3609 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); 3643 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c722c3b5404d..0d6469d74be4 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,10 @@
39#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
40#include <drm/drm_edid.h> 40#include <drm/drm_edid.h>
41 41
42MODULE_AUTHOR("David Airlie, Jesse Barnes");
43MODULE_DESCRIPTION("DRM KMS helper");
44MODULE_LICENSE("GPL and additional rights");
45
42/** 46/**
43 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the 47 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
44 * connector list 48 * connector list
@@ -76,7 +80,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
76{ 80{
77 struct drm_display_mode *mode; 81 struct drm_display_mode *mode;
78 82
79 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) 83 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
84 DRM_MODE_FLAG_3D_MASK))
80 return; 85 return;
81 86
82 list_for_each_entry(mode, &connector->modes, head) { 87 list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
86 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) && 91 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
87 !(flags & DRM_MODE_FLAG_DBLSCAN)) 92 !(flags & DRM_MODE_FLAG_DBLSCAN))
88 mode->status = MODE_NO_DBLESCAN; 93 mode->status = MODE_NO_DBLESCAN;
94 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
95 !(flags & DRM_MODE_FLAG_3D_MASK))
96 mode->status = MODE_NO_STEREO;
89 } 97 }
90 98
91 return; 99 return;
@@ -105,9 +113,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
105 * then culled (based on validity and the @maxX, @maxY parameters) and put into 113 * then culled (based on validity and the @maxX, @maxY parameters) and put into
106 * the normal modes list. 114 * the normal modes list.
107 * 115 *
108 * Intended to be use as a generic implementation of the ->probe() @connector 116 * Intended to be use as a generic implementation of the ->fill_modes()
109 * callback for drivers that use the crtc helpers for output mode filtering and 117 * @connector vfunc for drivers that use the crtc helpers for output mode
110 * detection. 118 * filtering and detection.
111 * 119 *
112 * RETURNS: 120 * RETURNS:
113 * Number of modes found on @connector. 121 * Number of modes found on @connector.
@@ -175,6 +183,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
175 mode_flags |= DRM_MODE_FLAG_INTERLACE; 183 mode_flags |= DRM_MODE_FLAG_INTERLACE;
176 if (connector->doublescan_allowed) 184 if (connector->doublescan_allowed)
177 mode_flags |= DRM_MODE_FLAG_DBLSCAN; 185 mode_flags |= DRM_MODE_FLAG_DBLSCAN;
186 if (connector->stereo_allowed)
187 mode_flags |= DRM_MODE_FLAG_3D_MASK;
178 drm_mode_validate_flag(connector, mode_flags); 188 drm_mode_validate_flag(connector, mode_flags);
179 189
180 list_for_each_entry(mode, &connector->modes, head) { 190 list_for_each_entry(mode, &connector->modes, head) {
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 89e196627160..9e978aae8972 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
228EXPORT_SYMBOL(i2c_dp_aux_add_bus); 228EXPORT_SYMBOL(i2c_dp_aux_add_bus);
229 229
230/* Helpers for DP link training */ 230/* Helpers for DP link training */
231static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) 231static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
232{ 232{
233 return link_status[r - DP_LANE0_1_STATUS]; 233 return link_status[r - DP_LANE0_1_STATUS];
234} 234}
235 235
236static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], 236static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
237 int lane) 237 int lane)
238{ 238{
239 int i = DP_LANE0_1_STATUS + (lane >> 1); 239 int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
242 return (l >> s) & 0xf; 242 return (l >> s) & 0xf;
243} 243}
244 244
245bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], 245bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
246 int lane_count) 246 int lane_count)
247{ 247{
248 u8 lane_align; 248 u8 lane_align;
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
262} 262}
263EXPORT_SYMBOL(drm_dp_channel_eq_ok); 263EXPORT_SYMBOL(drm_dp_channel_eq_ok);
264 264
265bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], 265bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
266 int lane_count) 266 int lane_count)
267{ 267{
268 int lane; 268 int lane;
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
277} 277}
278EXPORT_SYMBOL(drm_dp_clock_recovery_ok); 278EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
279 279
280u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], 280u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
281 int lane) 281 int lane)
282{ 282{
283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
290} 290}
291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); 291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
292 292
293u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 293u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
294 int lane) 294 int lane)
295{ 295{
296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
303} 303}
304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); 304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
305 305
306void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 306void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
308 udelay(100); 308 udelay(100);
309 else 309 else
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
311} 311}
312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); 312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
313 313
314void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 314void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
316 udelay(400); 316 udelay(400);
317 else 317 else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fe58d0833a11..05e197d32c45 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 73 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 74
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 75 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
170 171
171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
172 173
173/**
174 * drm_legacy_dev_reinit
175 *
176 * Reinitializes a legacy/ums drm device in it's lastclose function.
177 */
178static void drm_legacy_dev_reinit(struct drm_device *dev)
179{
180 int i;
181
182 if (drm_core_check_feature(dev, DRIVER_MODESET))
183 return;
184
185 atomic_set(&dev->ioctl_count, 0);
186 atomic_set(&dev->vma_count, 0);
187
188 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
189 atomic_set(&dev->counts[i], 0);
190
191 dev->sigdata.lock = NULL;
192
193 dev->context_flag = 0;
194 dev->last_context = 0;
195 dev->if_version = 0;
196}
197
198/**
199 * Take down the DRM device.
200 *
201 * \param dev DRM device structure.
202 *
203 * Frees every resource in \p dev.
204 *
205 * \sa drm_device
206 */
207int drm_lastclose(struct drm_device * dev)
208{
209 struct drm_vma_entry *vma, *vma_temp;
210
211 DRM_DEBUG("\n");
212
213 if (dev->driver->lastclose)
214 dev->driver->lastclose(dev);
215 DRM_DEBUG("driver lastclose completed\n");
216
217 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
218 drm_irq_uninstall(dev);
219
220 mutex_lock(&dev->struct_mutex);
221
222 drm_agp_clear(dev);
223
224 drm_legacy_sg_cleanup(dev);
225
226 /* Clear vma list (only built for debugging) */
227 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
228 list_del(&vma->head);
229 kfree(vma);
230 }
231
232 drm_legacy_dma_takedown(dev);
233
234 dev->dev_mapping = NULL;
235 mutex_unlock(&dev->struct_mutex);
236
237 drm_legacy_dev_reinit(dev);
238
239 DRM_DEBUG("lastclose completed\n");
240 return 0;
241}
242
243/** File operations structure */ 174/** File operations structure */
244static const struct file_operations drm_stub_fops = { 175static const struct file_operations drm_stub_fops = {
245 .owner = THIS_MODULE, 176 .owner = THIS_MODULE,
@@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
385 return -ENODEV; 316 return -ENODEV;
386 317
387 atomic_inc(&dev->ioctl_count); 318 atomic_inc(&dev->ioctl_count);
388 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
389 ++file_priv->ioctl_count; 319 ++file_priv->ioctl_count;
390 320
391 if ((nr >= DRM_CORE_IOCTL_COUNT) && 321 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 830f7501cb4d..f1764ec5818b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1264,6 +1264,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1264} 1264}
1265EXPORT_SYMBOL(drm_get_edid); 1265EXPORT_SYMBOL(drm_get_edid);
1266 1266
1267/**
1268 * drm_edid_duplicate - duplicate an EDID and the extensions
1269 * @edid: EDID to duplicate
1270 *
1271 * Return duplicate edid or NULL on allocation failure.
1272 */
1273struct edid *drm_edid_duplicate(const struct edid *edid)
1274{
1275 return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
1276}
1277EXPORT_SYMBOL(drm_edid_duplicate);
1278
1267/*** EDID parsing ***/ 1279/*** EDID parsing ***/
1268 1280
1269/** 1281/**
@@ -2404,7 +2416,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2404 2416
2405 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2417 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2406 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2418 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2407 drm_mode_equal_no_clocks(to_match, cea_mode)) 2419 drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
2408 return mode + 1; 2420 return mode + 1;
2409 } 2421 }
2410 return 0; 2422 return 0;
@@ -2453,7 +2465,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2453 2465
2454 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2466 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2455 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2467 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2456 drm_mode_equal_no_clocks(to_match, hdmi_mode)) 2468 drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
2457 return mode + 1; 2469 return mode + 1;
2458 } 2470 }
2459 return 0; 2471 return 0;
@@ -2507,6 +2519,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2507 if (!newmode) 2519 if (!newmode)
2508 continue; 2520 continue;
2509 2521
2522 /* Carry over the stereo flags */
2523 newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
2524
2510 /* 2525 /*
2511 * The current mode could be either variant. Make 2526 * The current mode could be either variant. Make
2512 * sure to pick the "other" clock for the new mode. 2527 * sure to pick the "other" clock for the new mode.
@@ -2553,20 +2568,151 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
2553 return modes; 2568 return modes;
2554} 2569}
2555 2570
2571struct stereo_mandatory_mode {
2572 int width, height, vrefresh;
2573 unsigned int flags;
2574};
2575
2576static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
2577 { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2578 { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
2579 { 1920, 1080, 50,
2580 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2581 { 1920, 1080, 60,
2582 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2583 { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2584 { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
2585 { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2586 { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
2587};
2588
2589static bool
2590stereo_match_mandatory(const struct drm_display_mode *mode,
2591 const struct stereo_mandatory_mode *stereo_mode)
2592{
2593 unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
2594
2595 return mode->hdisplay == stereo_mode->width &&
2596 mode->vdisplay == stereo_mode->height &&
2597 interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
2598 drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
2599}
2600
2601static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
2602{
2603 struct drm_device *dev = connector->dev;
2604 const struct drm_display_mode *mode;
2605 struct list_head stereo_modes;
2606 int modes = 0, i;
2607
2608 INIT_LIST_HEAD(&stereo_modes);
2609
2610 list_for_each_entry(mode, &connector->probed_modes, head) {
2611 for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
2612 const struct stereo_mandatory_mode *mandatory;
2613 struct drm_display_mode *new_mode;
2614
2615 if (!stereo_match_mandatory(mode,
2616 &stereo_mandatory_modes[i]))
2617 continue;
2618
2619 mandatory = &stereo_mandatory_modes[i];
2620 new_mode = drm_mode_duplicate(dev, mode);
2621 if (!new_mode)
2622 continue;
2623
2624 new_mode->flags |= mandatory->flags;
2625 list_add_tail(&new_mode->head, &stereo_modes);
2626 modes++;
2627 }
2628 }
2629
2630 list_splice_tail(&stereo_modes, &connector->probed_modes);
2631
2632 return modes;
2633}
2634
2635static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
2636{
2637 struct drm_device *dev = connector->dev;
2638 struct drm_display_mode *newmode;
2639
2640 vic--; /* VICs start at 1 */
2641 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2642 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2643 return 0;
2644 }
2645
2646 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2647 if (!newmode)
2648 return 0;
2649
2650 drm_mode_probed_add(connector, newmode);
2651
2652 return 1;
2653}
2654
2655static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2656 const u8 *video_db, u8 video_len, u8 video_index)
2657{
2658 struct drm_device *dev = connector->dev;
2659 struct drm_display_mode *newmode;
2660 int modes = 0;
2661 u8 cea_mode;
2662
2663 if (video_db == NULL || video_index > video_len)
2664 return 0;
2665
2666 /* CEA modes are numbered 1..127 */
2667 cea_mode = (video_db[video_index] & 127) - 1;
2668 if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
2669 return 0;
2670
2671 if (structure & (1 << 0)) {
2672 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2673 if (newmode) {
2674 newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
2675 drm_mode_probed_add(connector, newmode);
2676 modes++;
2677 }
2678 }
2679 if (structure & (1 << 6)) {
2680 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2681 if (newmode) {
2682 newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
2683 drm_mode_probed_add(connector, newmode);
2684 modes++;
2685 }
2686 }
2687 if (structure & (1 << 8)) {
2688 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2689 if (newmode) {
2690 newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
2691 drm_mode_probed_add(connector, newmode);
2692 modes++;
2693 }
2694 }
2695
2696 return modes;
2697}
2698
2556/* 2699/*
2557 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block 2700 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
2558 * @connector: connector corresponding to the HDMI sink 2701 * @connector: connector corresponding to the HDMI sink
2559 * @db: start of the CEA vendor specific block 2702 * @db: start of the CEA vendor specific block
2560 * @len: length of the CEA block payload, ie. one can access up to db[len] 2703 * @len: length of the CEA block payload, ie. one can access up to db[len]
2561 * 2704 *
2562 * Parses the HDMI VSDB looking for modes to add to @connector. 2705 * Parses the HDMI VSDB looking for modes to add to @connector. This function
2706 * also adds the stereo 3d modes when applicable.
2563 */ 2707 */
2564static int 2708static int
2565do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) 2709do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
2710 const u8 *video_db, u8 video_len)
2566{ 2711{
2567 struct drm_device *dev = connector->dev; 2712 int modes = 0, offset = 0, i, multi_present = 0;
2568 int modes = 0, offset = 0, i; 2713 u8 vic_len, hdmi_3d_len = 0;
2569 u8 vic_len; 2714 u16 mask;
2715 u16 structure_all;
2570 2716
2571 if (len < 8) 2717 if (len < 8)
2572 goto out; 2718 goto out;
@@ -2585,30 +2731,56 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2585 2731
2586 /* the declared length is not long enough for the 2 first bytes 2732 /* the declared length is not long enough for the 2 first bytes
2587 * of additional video format capabilities */ 2733 * of additional video format capabilities */
2588 offset += 2; 2734 if (len < (8 + offset + 2))
2589 if (len < (8 + offset))
2590 goto out; 2735 goto out;
2591 2736
2737 /* 3D_Present */
2738 offset++;
2739 if (db[8 + offset] & (1 << 7)) {
2740 modes += add_hdmi_mandatory_stereo_modes(connector);
2741
2742 /* 3D_Multi_present */
2743 multi_present = (db[8 + offset] & 0x60) >> 5;
2744 }
2745
2746 offset++;
2592 vic_len = db[8 + offset] >> 5; 2747 vic_len = db[8 + offset] >> 5;
2748 hdmi_3d_len = db[8 + offset] & 0x1f;
2593 2749
2594 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { 2750 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
2595 struct drm_display_mode *newmode;
2596 u8 vic; 2751 u8 vic;
2597 2752
2598 vic = db[9 + offset + i]; 2753 vic = db[9 + offset + i];
2754 modes += add_hdmi_mode(connector, vic);
2755 }
2756 offset += 1 + vic_len;
2599 2757
2600 vic--; /* VICs start at 1 */ 2758 if (!(multi_present == 1 || multi_present == 2))
2601 if (vic >= ARRAY_SIZE(edid_4k_modes)) { 2759 goto out;
2602 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2603 continue;
2604 }
2605 2760
2606 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]); 2761 if ((multi_present == 1 && len < (9 + offset)) ||
2607 if (!newmode) 2762 (multi_present == 2 && len < (11 + offset)))
2608 continue; 2763 goto out;
2609 2764
2610 drm_mode_probed_add(connector, newmode); 2765 if ((multi_present == 1 && hdmi_3d_len < 2) ||
2611 modes++; 2766 (multi_present == 2 && hdmi_3d_len < 4))
2767 goto out;
2768
2769 /* 3D_Structure_ALL */
2770 structure_all = (db[8 + offset] << 8) | db[9 + offset];
2771
2772 /* check if 3D_MASK is present */
2773 if (multi_present == 2)
2774 mask = (db[10 + offset] << 8) | db[11 + offset];
2775 else
2776 mask = 0xffff;
2777
2778 for (i = 0; i < 16; i++) {
2779 if (mask & (1 << i))
2780 modes += add_3d_struct_modes(connector,
2781 structure_all,
2782 video_db,
2783 video_len, i);
2612 } 2784 }
2613 2785
2614out: 2786out:
@@ -2668,8 +2840,8 @@ static int
2668add_cea_modes(struct drm_connector *connector, struct edid *edid) 2840add_cea_modes(struct drm_connector *connector, struct edid *edid)
2669{ 2841{
2670 const u8 *cea = drm_find_cea_extension(edid); 2842 const u8 *cea = drm_find_cea_extension(edid);
2671 const u8 *db; 2843 const u8 *db, *hdmi = NULL, *video = NULL;
2672 u8 dbl; 2844 u8 dbl, hdmi_len, video_len = 0;
2673 int modes = 0; 2845 int modes = 0;
2674 2846
2675 if (cea && cea_revision(cea) >= 3) { 2847 if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2854,26 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
2682 db = &cea[i]; 2854 db = &cea[i];
2683 dbl = cea_db_payload_len(db); 2855 dbl = cea_db_payload_len(db);
2684 2856
2685 if (cea_db_tag(db) == VIDEO_BLOCK) 2857 if (cea_db_tag(db) == VIDEO_BLOCK) {
2686 modes += do_cea_modes(connector, db + 1, dbl); 2858 video = db + 1;
2687 else if (cea_db_is_hdmi_vsdb(db)) 2859 video_len = dbl;
2688 modes += do_hdmi_vsdb_modes(connector, db, dbl); 2860 modes += do_cea_modes(connector, video, dbl);
2861 }
2862 else if (cea_db_is_hdmi_vsdb(db)) {
2863 hdmi = db;
2864 hdmi_len = dbl;
2865 }
2689 } 2866 }
2690 } 2867 }
2691 2868
2869 /*
2870 * We parse the HDMI VSDB after having added the cea modes as we will
2871 * be patching their flags when the sink supports stereo 3D.
2872 */
2873 if (hdmi)
2874 modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
2875 video_len);
2876
2692 return modes; 2877 return modes;
2693} 2878}
2694 2879
@@ -3321,6 +3506,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3321} 3506}
3322EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 3507EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
3323 3508
3509static enum hdmi_3d_structure
3510s3d_structure_from_display_mode(const struct drm_display_mode *mode)
3511{
3512 u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
3513
3514 switch (layout) {
3515 case DRM_MODE_FLAG_3D_FRAME_PACKING:
3516 return HDMI_3D_STRUCTURE_FRAME_PACKING;
3517 case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
3518 return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
3519 case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
3520 return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
3521 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
3522 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
3523 case DRM_MODE_FLAG_3D_L_DEPTH:
3524 return HDMI_3D_STRUCTURE_L_DEPTH;
3525 case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
3526 return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
3527 case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
3528 return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
3529 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
3530 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
3531 default:
3532 return HDMI_3D_STRUCTURE_INVALID;
3533 }
3534}
3535
3324/** 3536/**
3325 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with 3537 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
3326 * data from a DRM display mode 3538 * data from a DRM display mode
@@ -3338,20 +3550,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3338 const struct drm_display_mode *mode) 3550 const struct drm_display_mode *mode)
3339{ 3551{
3340 int err; 3552 int err;
3553 u32 s3d_flags;
3341 u8 vic; 3554 u8 vic;
3342 3555
3343 if (!frame || !mode) 3556 if (!frame || !mode)
3344 return -EINVAL; 3557 return -EINVAL;
3345 3558
3346 vic = drm_match_hdmi_mode(mode); 3559 vic = drm_match_hdmi_mode(mode);
3347 if (!vic) 3560 s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
3561
3562 if (!vic && !s3d_flags)
3563 return -EINVAL;
3564
3565 if (vic && s3d_flags)
3348 return -EINVAL; 3566 return -EINVAL;
3349 3567
3350 err = hdmi_vendor_infoframe_init(frame); 3568 err = hdmi_vendor_infoframe_init(frame);
3351 if (err < 0) 3569 if (err < 0)
3352 return err; 3570 return err;
3353 3571
3354 frame->vic = vic; 3572 if (vic)
3573 frame->vic = vic;
3574 else
3575 frame->s3d_struct = s3d_structure_from_display_mode(mode);
3355 3576
3356 return 0; 3577 return 0;
3357} 3578}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 271b42bbfb72..9081172ef057 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
32 "from built-in data or /lib/firmware instead. "); 32 "from built-in data or /lib/firmware instead. ");
33 33
34#define GENERIC_EDIDS 5 34#define GENERIC_EDIDS 5
35static char *generic_edid_name[GENERIC_EDIDS] = { 35static const char *generic_edid_name[GENERIC_EDIDS] = {
36 "edid/1024x768.bin", 36 "edid/1024x768.bin",
37 "edid/1280x1024.bin", 37 "edid/1280x1024.bin",
38 "edid/1600x1200.bin", 38 "edid/1600x1200.bin",
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
40 "edid/1920x1080.bin", 40 "edid/1920x1080.bin",
41}; 41};
42 42
43static u8 generic_edid[GENERIC_EDIDS][128] = { 43static const u8 generic_edid[GENERIC_EDIDS][128] = {
44 { 44 {
45 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 45 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
46 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 46 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
133 }, 133 },
134}; 134};
135 135
136static int edid_size(const u8 *edid, int data_size)
137{
138 if (data_size < EDID_LENGTH)
139 return 0;
140
141 return (edid[0x7e] + 1) * EDID_LENGTH;
142}
143
136static u8 *edid_load(struct drm_connector *connector, const char *name, 144static u8 *edid_load(struct drm_connector *connector, const char *name,
137 const char *connector_name) 145 const char *connector_name)
138{ 146{
139 const struct firmware *fw; 147 const struct firmware *fw = NULL;
140 struct platform_device *pdev; 148 const u8 *fwdata;
141 u8 *fwdata = NULL, *edid, *new_edid; 149 u8 *edid;
142 int fwsize, expected; 150 int fwsize, builtin;
143 int builtin = 0, err = 0;
144 int i, valid_extensions = 0; 151 int i, valid_extensions = 0;
145 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); 152 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
146 153
147 pdev = platform_device_register_simple(connector_name, -1, NULL, 0); 154 builtin = 0;
148 if (IS_ERR(pdev)) { 155 for (i = 0; i < GENERIC_EDIDS; i++) {
149 DRM_ERROR("Failed to register EDID firmware platform device " 156 if (strcmp(name, generic_edid_name[i]) == 0) {
150 "for connector \"%s\"\n", connector_name);
151 err = -EINVAL;
152 goto out;
153 }
154
155 err = request_firmware(&fw, name, &pdev->dev);
156 platform_device_unregister(pdev);
157
158 if (err) {
159 i = 0;
160 while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
161 i++;
162 if (i < GENERIC_EDIDS) {
163 err = 0;
164 builtin = 1;
165 fwdata = generic_edid[i]; 157 fwdata = generic_edid[i];
166 fwsize = sizeof(generic_edid[i]); 158 fwsize = sizeof(generic_edid[i]);
159 builtin = 1;
160 break;
167 } 161 }
168 } 162 }
163 if (!builtin) {
164 struct platform_device *pdev;
165 int err;
169 166
170 if (err) { 167 pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
171 DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n", 168 if (IS_ERR(pdev)) {
172 name, err); 169 DRM_ERROR("Failed to register EDID firmware platform device "
173 goto out; 170 "for connector \"%s\"\n", connector_name);
174 } 171 return ERR_CAST(pdev);
172 }
173
174 err = request_firmware(&fw, name, &pdev->dev);
175 platform_device_unregister(pdev);
176 if (err) {
177 DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
178 name, err);
179 return ERR_PTR(err);
180 }
175 181
176 if (fwdata == NULL) { 182 fwdata = fw->data;
177 fwdata = (u8 *) fw->data;
178 fwsize = fw->size; 183 fwsize = fw->size;
179 } 184 }
180 185
181 expected = (fwdata[0x7e] + 1) * EDID_LENGTH; 186 if (edid_size(fwdata, fwsize) != fwsize) {
182 if (expected != fwsize) {
183 DRM_ERROR("Size of EDID firmware \"%s\" is invalid " 187 DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
184 "(expected %d, got %d)\n", name, expected, (int) fwsize); 188 "(expected %d, got %d\n", name,
185 err = -EINVAL; 189 edid_size(fwdata, fwsize), (int)fwsize);
186 goto relfw_out; 190 edid = ERR_PTR(-EINVAL);
191 goto out;
187 } 192 }
188 193
189 edid = kmemdup(fwdata, fwsize, GFP_KERNEL); 194 edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
190 if (edid == NULL) { 195 if (edid == NULL) {
191 err = -ENOMEM; 196 edid = ERR_PTR(-ENOMEM);
192 goto relfw_out; 197 goto out;
193 } 198 }
194 199
195 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { 200 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
197 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", 202 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
198 name); 203 name);
199 kfree(edid); 204 kfree(edid);
200 err = -EINVAL; 205 edid = ERR_PTR(-EINVAL);
201 goto relfw_out; 206 goto out;
202 } 207 }
203 208
204 for (i = 1; i <= edid[0x7e]; i++) { 209 for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
210 } 215 }
211 216
212 if (valid_extensions != edid[0x7e]) { 217 if (valid_extensions != edid[0x7e]) {
218 u8 *new_edid;
219
213 edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; 220 edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
214 DRM_INFO("Found %d valid extensions instead of %d in EDID data " 221 DRM_INFO("Found %d valid extensions instead of %d in EDID data "
215 "\"%s\" for connector \"%s\"\n", valid_extensions, 222 "\"%s\" for connector \"%s\"\n", valid_extensions,
216 edid[0x7e], name, connector_name); 223 edid[0x7e], name, connector_name);
217 edid[0x7e] = valid_extensions; 224 edid[0x7e] = valid_extensions;
225
218 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, 226 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
219 GFP_KERNEL); 227 GFP_KERNEL);
220 if (new_edid == NULL) { 228 if (new_edid)
221 err = -ENOMEM; 229 edid = new_edid;
222 kfree(edid);
223 goto relfw_out;
224 }
225 edid = new_edid;
226 } 230 }
227 231
228 DRM_INFO("Got %s EDID base block and %d extension%s from " 232 DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
230 "external", valid_extensions, valid_extensions == 1 ? "" : "s", 234 "external", valid_extensions, valid_extensions == 1 ? "" : "s",
231 name, connector_name); 235 name, connector_name);
232 236
233relfw_out:
234 release_firmware(fw);
235
236out: 237out:
237 if (err) 238 if (fw)
238 return ERR_PTR(err); 239 release_firmware(fw);
239
240 return edid; 240 return edid;
241} 241}
242 242
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e257f..0a19401aff80 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -39,10 +39,6 @@
39#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
40#include <drm/drm_crtc_helper.h> 40#include <drm/drm_crtc_helper.h>
41 41
42MODULE_AUTHOR("David Airlie, Jesse Barnes");
43MODULE_DESCRIPTION("DRM KMS helper");
44MODULE_LICENSE("GPL and additional rights");
45
46static LIST_HEAD(kernel_fb_helper_list); 42static LIST_HEAD(kernel_fb_helper_list);
47 43
48/** 44/**
@@ -844,7 +840,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
844 struct drm_fb_helper *fb_helper = info->par; 840 struct drm_fb_helper *fb_helper = info->par;
845 struct drm_device *dev = fb_helper->dev; 841 struct drm_device *dev = fb_helper->dev;
846 struct drm_mode_set *modeset; 842 struct drm_mode_set *modeset;
847 struct drm_crtc *crtc;
848 int ret = 0; 843 int ret = 0;
849 int i; 844 int i;
850 845
@@ -855,8 +850,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
855 } 850 }
856 851
857 for (i = 0; i < fb_helper->crtc_count; i++) { 852 for (i = 0; i < fb_helper->crtc_count; i++) {
858 crtc = fb_helper->crtc_info[i].mode_set.crtc;
859
860 modeset = &fb_helper->crtc_info[i].mode_set; 853 modeset = &fb_helper->crtc_info[i].mode_set;
861 854
862 modeset->x = var->xoffset; 855 modeset->x = var->xoffset;
@@ -1352,7 +1345,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1352 struct drm_connector *connector; 1345 struct drm_connector *connector;
1353 struct drm_connector_helper_funcs *connector_funcs; 1346 struct drm_connector_helper_funcs *connector_funcs;
1354 struct drm_encoder *encoder; 1347 struct drm_encoder *encoder;
1355 struct drm_fb_helper_crtc *best_crtc;
1356 int my_score, best_score, score; 1348 int my_score, best_score, score;
1357 struct drm_fb_helper_crtc **crtcs, *crtc; 1349 struct drm_fb_helper_crtc **crtcs, *crtc;
1358 struct drm_fb_helper_connector *fb_helper_conn; 1350 struct drm_fb_helper_connector *fb_helper_conn;
@@ -1364,7 +1356,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1364 connector = fb_helper_conn->connector; 1356 connector = fb_helper_conn->connector;
1365 1357
1366 best_crtcs[n] = NULL; 1358 best_crtcs[n] = NULL;
1367 best_crtc = NULL;
1368 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); 1359 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
1369 if (modes[n] == NULL) 1360 if (modes[n] == NULL)
1370 return best_score; 1361 return best_score;
@@ -1413,7 +1404,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1413 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, 1404 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
1414 width, height); 1405 width, height);
1415 if (score > best_score) { 1406 if (score > best_score) {
1416 best_crtc = crtc;
1417 best_score = score; 1407 best_score = score;
1418 memcpy(best_crtcs, crtcs, 1408 memcpy(best_crtcs, crtcs,
1419 dev->mode_config.num_connector * 1409 dev->mode_config.num_connector *
@@ -1580,8 +1570,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
1580int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 1570int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1581{ 1571{
1582 struct drm_device *dev = fb_helper->dev; 1572 struct drm_device *dev = fb_helper->dev;
1583 int count = 0; 1573 u32 max_width, max_height;
1584 u32 max_width, max_height, bpp_sel;
1585 1574
1586 if (!fb_helper->fb) 1575 if (!fb_helper->fb)
1587 return 0; 1576 return 0;
@@ -1596,10 +1585,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1596 1585
1597 max_width = fb_helper->fb->width; 1586 max_width = fb_helper->fb->width;
1598 max_height = fb_helper->fb->height; 1587 max_height = fb_helper->fb->height;
1599 bpp_sel = fb_helper->fb->bits_per_pixel;
1600 1588
1601 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1589 drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
1602 max_height);
1603 mutex_unlock(&fb_helper->dev->mode_config.mutex); 1590 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1604 1591
1605 drm_modeset_lock_all(dev); 1592 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3f84277d7036..d0e27667a4eb 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
113 retcode = drm_open_helper(inode, filp, dev); 113 retcode = drm_open_helper(inode, filp, dev);
114 if (retcode) 114 if (retcode)
115 goto err_undo; 115 goto err_undo;
116 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
117 if (need_setup) { 116 if (need_setup) {
118 retcode = drm_setup(dev); 117 retcode = drm_setup(dev);
119 if (retcode) 118 if (retcode)
@@ -386,6 +385,71 @@ static void drm_events_release(struct drm_file *file_priv)
386} 385}
387 386
388/** 387/**
388 * drm_legacy_dev_reinit
389 *
390 * Reinitializes a legacy/ums drm device in it's lastclose function.
391 */
392static void drm_legacy_dev_reinit(struct drm_device *dev)
393{
394 if (drm_core_check_feature(dev, DRIVER_MODESET))
395 return;
396
397 atomic_set(&dev->ioctl_count, 0);
398 atomic_set(&dev->vma_count, 0);
399
400 dev->sigdata.lock = NULL;
401
402 dev->context_flag = 0;
403 dev->last_context = 0;
404 dev->if_version = 0;
405}
406
407/**
408 * Take down the DRM device.
409 *
410 * \param dev DRM device structure.
411 *
412 * Frees every resource in \p dev.
413 *
414 * \sa drm_device
415 */
416int drm_lastclose(struct drm_device * dev)
417{
418 struct drm_vma_entry *vma, *vma_temp;
419
420 DRM_DEBUG("\n");
421
422 if (dev->driver->lastclose)
423 dev->driver->lastclose(dev);
424 DRM_DEBUG("driver lastclose completed\n");
425
426 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
427 drm_irq_uninstall(dev);
428
429 mutex_lock(&dev->struct_mutex);
430
431 drm_agp_clear(dev);
432
433 drm_legacy_sg_cleanup(dev);
434
435 /* Clear vma list (only built for debugging) */
436 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
437 list_del(&vma->head);
438 kfree(vma);
439 }
440
441 drm_legacy_dma_takedown(dev);
442
443 dev->dev_mapping = NULL;
444 mutex_unlock(&dev->struct_mutex);
445
446 drm_legacy_dev_reinit(dev);
447
448 DRM_DEBUG("lastclose completed\n");
449 return 0;
450}
451
452/**
389 * Release file. 453 * Release file.
390 * 454 *
391 * \param inode device inode 455 * \param inode device inode
@@ -454,7 +518,6 @@ int drm_release(struct inode *inode, struct file *filp)
454 518
455 list_del(&pos->head); 519 list_del(&pos->head);
456 kfree(pos); 520 kfree(pos);
457 --dev->ctx_count;
458 } 521 }
459 } 522 }
460 } 523 }
@@ -516,7 +579,6 @@ int drm_release(struct inode *inode, struct file *filp)
516 * End inline drm_release 579 * End inline drm_release
517 */ 580 */
518 581
519 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
520 if (!--dev->open_count) { 582 if (!--dev->open_count) {
521 if (atomic_read(&dev->ioctl_count)) { 583 if (atomic_read(&dev->ioctl_count)) {
522 DRM_ERROR("Device busy: %d\n", 584 DRM_ERROR("Device busy: %d\n",
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 49293bdc972a..4761adedad2a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
160} 160}
161EXPORT_SYMBOL(drm_gem_private_object_init); 161EXPORT_SYMBOL(drm_gem_private_object_init);
162 162
163/**
164 * Allocate a GEM object of the specified size with shmfs backing store
165 */
166struct drm_gem_object *
167drm_gem_object_alloc(struct drm_device *dev, size_t size)
168{
169 struct drm_gem_object *obj;
170
171 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
172 if (!obj)
173 goto free;
174
175 if (drm_gem_object_init(dev, obj, size) != 0)
176 goto free;
177
178 if (dev->driver->gem_init_object != NULL &&
179 dev->driver->gem_init_object(obj) != 0) {
180 goto fput;
181 }
182 return obj;
183fput:
184 /* Object_init mangles the global counters - readjust them. */
185 fput(obj->filp);
186free:
187 kfree(obj);
188 return NULL;
189}
190EXPORT_SYMBOL(drm_gem_object_alloc);
191
192static void 163static void
193drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 164drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
194{ 165{
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f7311162a61d..3d2e91c4d78e 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
67{ 67{
68 int ret; 68 int ret;
69 struct drm_global_item *item = &glob[ref->global_type]; 69 struct drm_global_item *item = &glob[ref->global_type];
70 void *object;
71 70
72 mutex_lock(&item->mutex); 71 mutex_lock(&item->mutex);
73 if (item->refcount == 0) { 72 if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
85 } 84 }
86 ++item->refcount; 85 ++item->refcount;
87 ref->object = item->object; 86 ref->object = item->object;
88 object = item->object;
89 mutex_unlock(&item->mutex); 87 mutex_unlock(&item->mutex);
90 return 0; 88 return 0;
91out_err: 89out_err:
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 53298320080b..7d5a152eeb02 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
163 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
164 for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 164 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
165 seq_printf(m, "CRTC %d enable: %d\n", 165 seq_printf(m, "CRTC %d enable: %d\n",
166 crtc, atomic_read(&dev->vblank_refcount[crtc])); 166 crtc, atomic_read(&dev->vblank[crtc].refcount));
167 seq_printf(m, "CRTC %d counter: %d\n", 167 seq_printf(m, "CRTC %d counter: %d\n",
168 crtc, drm_vblank_count(dev, crtc)); 168 crtc, drm_vblank_count(dev, crtc));
169 seq_printf(m, "CRTC %d last wait: %d\n", 169 seq_printf(m, "CRTC %d last wait: %d\n",
170 crtc, dev->last_vblank_wait[crtc]); 170 crtc, dev->vblank[crtc].last_wait);
171 seq_printf(m, "CRTC %d in modeset: %d\n", 171 seq_printf(m, "CRTC %d in modeset: %d\n",
172 crtc, dev->vblank_inmodeset[crtc]); 172 crtc, dev->vblank[crtc].inmodeset);
173 } 173 }
174 mutex_unlock(&dev->struct_mutex); 174 mutex_unlock(&dev->struct_mutex);
175 return 0; 175 return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2855a2..dffc836144cc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
303} 303}
304 304
305/** 305/**
306 * Set device/driver capabilities
307 */
308int
309drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
310{
311 struct drm_set_client_cap *req = data;
312
313 switch (req->capability) {
314 case DRM_CLIENT_CAP_STEREO_3D:
315 if (req->value > 1)
316 return -EINVAL;
317 file_priv->stereo_allowed = req->value;
318 break;
319 default:
320 return -EINVAL;
321 }
322
323 return 0;
324}
325
326/**
306 * Setversion ioctl. 327 * Setversion ioctl.
307 * 328 *
308 * \param inode device inode. 329 * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f92da0a32f0d..f9af048828ea 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,9 +43,8 @@
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45/* Access macro for slots in vblank timestamp ringbuffer. */ 45/* Access macro for slots in vblank timestamp ringbuffer. */
46#define vblanktimestamp(dev, crtc, count) ( \ 46#define vblanktimestamp(dev, crtc, count) \
47 (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ 47 ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
48 ((count) % DRM_VBLANKTIME_RBSIZE)])
49 48
50/* Retry timestamp calculation up to 3 times to satisfy 49/* Retry timestamp calculation up to 3 times to satisfy
51 * drm_timestamp_precision before giving up. 50 * drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
89 */ 88 */
90static void clear_vblank_timestamps(struct drm_device *dev, int crtc) 89static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
91{ 90{
92 memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, 91 memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
93 DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
94} 92}
95 93
96/* 94/*
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
115 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 113 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
116 114
117 dev->driver->disable_vblank(dev, crtc); 115 dev->driver->disable_vblank(dev, crtc);
118 dev->vblank_enabled[crtc] = 0; 116 dev->vblank[crtc].enabled = false;
119 117
120 /* No further vblank irq's will be processed after 118 /* No further vblank irq's will be processed after
121 * this point. Get current hardware vblank count and 119 * this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
130 * delayed gpu counter increment. 128 * delayed gpu counter increment.
131 */ 129 */
132 do { 130 do {
133 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 131 dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
134 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 132 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
135 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); 133 } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
136 134
137 if (!count) 135 if (!count)
138 vblrc = 0; 136 vblrc = 0;
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
140 /* Compute time difference to stored timestamp of last vblank 138 /* Compute time difference to stored timestamp of last vblank
141 * as updated by last invocation of drm_handle_vblank() in vblank irq. 139 * as updated by last invocation of drm_handle_vblank() in vblank irq.
142 */ 140 */
143 vblcount = atomic_read(&dev->_vblank_count[crtc]); 141 vblcount = atomic_read(&dev->vblank[crtc].count);
144 diff_ns = timeval_to_ns(&tvblank) - 142 diff_ns = timeval_to_ns(&tvblank) -
145 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 143 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
146 144
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
157 * hope for the best. 155 * hope for the best.
158 */ 156 */
159 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { 157 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
160 atomic_inc(&dev->_vblank_count[crtc]); 158 atomic_inc(&dev->vblank[crtc].count);
161 smp_mb__after_atomic_inc(); 159 smp_mb__after_atomic_inc();
162 } 160 }
163 161
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
178 176
179 for (i = 0; i < dev->num_crtcs; i++) { 177 for (i = 0; i < dev->num_crtcs; i++) {
180 spin_lock_irqsave(&dev->vbl_lock, irqflags); 178 spin_lock_irqsave(&dev->vbl_lock, irqflags);
181 if (atomic_read(&dev->vblank_refcount[i]) == 0 && 179 if (atomic_read(&dev->vblank[i].refcount) == 0 &&
182 dev->vblank_enabled[i]) { 180 dev->vblank[i].enabled) {
183 DRM_DEBUG("disabling vblank on crtc %d\n", i); 181 DRM_DEBUG("disabling vblank on crtc %d\n", i);
184 vblank_disable_and_save(dev, i); 182 vblank_disable_and_save(dev, i);
185 } 183 }
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
197 195
198 vblank_disable_fn((unsigned long)dev); 196 vblank_disable_fn((unsigned long)dev);
199 197
200 kfree(dev->vbl_queue); 198 kfree(dev->vblank);
201 kfree(dev->_vblank_count);
202 kfree(dev->vblank_refcount);
203 kfree(dev->vblank_enabled);
204 kfree(dev->last_vblank);
205 kfree(dev->last_vblank_wait);
206 kfree(dev->vblank_inmodeset);
207 kfree(dev->_vblank_time);
208 199
209 dev->num_crtcs = 0; 200 dev->num_crtcs = 0;
210} 201}
@@ -221,40 +212,12 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
221 212
222 dev->num_crtcs = num_crtcs; 213 dev->num_crtcs = num_crtcs;
223 214
224 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, 215 dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
225 GFP_KERNEL); 216 if (!dev->vblank)
226 if (!dev->vbl_queue)
227 goto err; 217 goto err;
228 218
229 dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL); 219 for (i = 0; i < num_crtcs; i++)
230 if (!dev->_vblank_count) 220 init_waitqueue_head(&dev->vblank[i].queue);
231 goto err;
232
233 dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
234 GFP_KERNEL);
235 if (!dev->vblank_refcount)
236 goto err;
237
238 dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
239 if (!dev->vblank_enabled)
240 goto err;
241
242 dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
243 if (!dev->last_vblank)
244 goto err;
245
246 dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
247 if (!dev->last_vblank_wait)
248 goto err;
249
250 dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
251 if (!dev->vblank_inmodeset)
252 goto err;
253
254 dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
255 sizeof(struct timeval), GFP_KERNEL);
256 if (!dev->_vblank_time)
257 goto err;
258 221
259 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); 222 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
260 223
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
264 else 227 else
265 DRM_INFO("No driver support for vblank timestamp query.\n"); 228 DRM_INFO("No driver support for vblank timestamp query.\n");
266 229
267 /* Zero per-crtc vblank stuff */ 230 dev->vblank_disable_allowed = false;
268 for (i = 0; i < num_crtcs; i++) {
269 init_waitqueue_head(&dev->vbl_queue[i]);
270 atomic_set(&dev->_vblank_count[i], 0);
271 atomic_set(&dev->vblank_refcount[i], 0);
272 }
273 231
274 dev->vblank_disable_allowed = 0;
275 return 0; 232 return 0;
276 233
277err: 234err:
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
336 mutex_unlock(&dev->struct_mutex); 293 mutex_unlock(&dev->struct_mutex);
337 return -EBUSY; 294 return -EBUSY;
338 } 295 }
339 dev->irq_enabled = 1; 296 dev->irq_enabled = true;
340 mutex_unlock(&dev->struct_mutex); 297 mutex_unlock(&dev->struct_mutex);
341 298
342 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 299 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
359 316
360 if (ret < 0) { 317 if (ret < 0) {
361 mutex_lock(&dev->struct_mutex); 318 mutex_lock(&dev->struct_mutex);
362 dev->irq_enabled = 0; 319 dev->irq_enabled = false;
363 mutex_unlock(&dev->struct_mutex); 320 mutex_unlock(&dev->struct_mutex);
364 return ret; 321 return ret;
365 } 322 }
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
373 330
374 if (ret < 0) { 331 if (ret < 0) {
375 mutex_lock(&dev->struct_mutex); 332 mutex_lock(&dev->struct_mutex);
376 dev->irq_enabled = 0; 333 dev->irq_enabled = false;
377 mutex_unlock(&dev->struct_mutex); 334 mutex_unlock(&dev->struct_mutex);
378 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 335 if (!drm_core_check_feature(dev, DRIVER_MODESET))
379 vga_client_register(dev->pdev, NULL, NULL, NULL); 336 vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
394int drm_irq_uninstall(struct drm_device *dev) 351int drm_irq_uninstall(struct drm_device *dev)
395{ 352{
396 unsigned long irqflags; 353 unsigned long irqflags;
397 int irq_enabled, i; 354 bool irq_enabled;
355 int i;
398 356
399 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 357 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
400 return -EINVAL; 358 return -EINVAL;
401 359
402 mutex_lock(&dev->struct_mutex); 360 mutex_lock(&dev->struct_mutex);
403 irq_enabled = dev->irq_enabled; 361 irq_enabled = dev->irq_enabled;
404 dev->irq_enabled = 0; 362 dev->irq_enabled = false;
405 mutex_unlock(&dev->struct_mutex); 363 mutex_unlock(&dev->struct_mutex);
406 364
407 /* 365 /*
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
410 if (dev->num_crtcs) { 368 if (dev->num_crtcs) {
411 spin_lock_irqsave(&dev->vbl_lock, irqflags); 369 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 for (i = 0; i < dev->num_crtcs; i++) { 370 for (i = 0; i < dev->num_crtcs; i++) {
413 DRM_WAKEUP(&dev->vbl_queue[i]); 371 DRM_WAKEUP(&dev->vblank[i].queue);
414 dev->vblank_enabled[i] = 0; 372 dev->vblank[i].enabled = false;
415 dev->last_vblank[i] = 373 dev->vblank[i].last =
416 dev->driver->get_vblank_counter(dev, i); 374 dev->driver->get_vblank_counter(dev, i);
417 } 375 }
418 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 376 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -795,7 +753,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
795 */ 753 */
796u32 drm_vblank_count(struct drm_device *dev, int crtc) 754u32 drm_vblank_count(struct drm_device *dev, int crtc)
797{ 755{
798 return atomic_read(&dev->_vblank_count[crtc]); 756 return atomic_read(&dev->vblank[crtc].count);
799} 757}
800EXPORT_SYMBOL(drm_vblank_count); 758EXPORT_SYMBOL(drm_vblank_count);
801 759
@@ -824,10 +782,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
824 * a seqlock. 782 * a seqlock.
825 */ 783 */
826 do { 784 do {
827 cur_vblank = atomic_read(&dev->_vblank_count[crtc]); 785 cur_vblank = atomic_read(&dev->vblank[crtc].count);
828 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); 786 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
829 smp_rmb(); 787 smp_rmb();
830 } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); 788 } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
831 789
832 return cur_vblank; 790 return cur_vblank;
833} 791}
@@ -914,12 +872,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
914 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); 872 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
915 873
916 /* Deal with counter wrap */ 874 /* Deal with counter wrap */
917 diff = cur_vblank - dev->last_vblank[crtc]; 875 diff = cur_vblank - dev->vblank[crtc].last;
918 if (cur_vblank < dev->last_vblank[crtc]) { 876 if (cur_vblank < dev->vblank[crtc].last) {
919 diff += dev->max_vblank_count; 877 diff += dev->max_vblank_count;
920 878
921 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 879 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
922 crtc, dev->last_vblank[crtc], cur_vblank, diff); 880 crtc, dev->vblank[crtc].last, cur_vblank, diff);
923 } 881 }
924 882
925 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", 883 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +888,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
930 * reinitialize delayed at next vblank interrupt in that case. 888 * reinitialize delayed at next vblank interrupt in that case.
931 */ 889 */
932 if (rc) { 890 if (rc) {
933 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 891 tslot = atomic_read(&dev->vblank[crtc].count) + diff;
934 vblanktimestamp(dev, crtc, tslot) = t_vblank; 892 vblanktimestamp(dev, crtc, tslot) = t_vblank;
935 } 893 }
936 894
937 smp_mb__before_atomic_inc(); 895 smp_mb__before_atomic_inc();
938 atomic_add(diff, &dev->_vblank_count[crtc]); 896 atomic_add(diff, &dev->vblank[crtc].count);
939 smp_mb__after_atomic_inc(); 897 smp_mb__after_atomic_inc();
940} 898}
941 899
@@ -957,9 +915,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
957 915
958 spin_lock_irqsave(&dev->vbl_lock, irqflags); 916 spin_lock_irqsave(&dev->vbl_lock, irqflags);
959 /* Going from 0->1 means we have to enable interrupts again */ 917 /* Going from 0->1 means we have to enable interrupts again */
960 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 918 if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
961 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 919 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
962 if (!dev->vblank_enabled[crtc]) { 920 if (!dev->vblank[crtc].enabled) {
963 /* Enable vblank irqs under vblank_time_lock protection. 921 /* Enable vblank irqs under vblank_time_lock protection.
964 * All vblank count & timestamp updates are held off 922 * All vblank count & timestamp updates are held off
965 * until we are done reinitializing master counter and 923 * until we are done reinitializing master counter and
@@ -970,16 +928,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
970 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", 928 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
971 crtc, ret); 929 crtc, ret);
972 if (ret) 930 if (ret)
973 atomic_dec(&dev->vblank_refcount[crtc]); 931 atomic_dec(&dev->vblank[crtc].refcount);
974 else { 932 else {
975 dev->vblank_enabled[crtc] = 1; 933 dev->vblank[crtc].enabled = true;
976 drm_update_vblank_count(dev, crtc); 934 drm_update_vblank_count(dev, crtc);
977 } 935 }
978 } 936 }
979 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 937 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
980 } else { 938 } else {
981 if (!dev->vblank_enabled[crtc]) { 939 if (!dev->vblank[crtc].enabled) {
982 atomic_dec(&dev->vblank_refcount[crtc]); 940 atomic_dec(&dev->vblank[crtc].refcount);
983 ret = -EINVAL; 941 ret = -EINVAL;
984 } 942 }
985 } 943 }
@@ -999,10 +957,10 @@ EXPORT_SYMBOL(drm_vblank_get);
999 */ 957 */
1000void drm_vblank_put(struct drm_device *dev, int crtc) 958void drm_vblank_put(struct drm_device *dev, int crtc)
1001{ 959{
1002 BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); 960 BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
1003 961
1004 /* Last user schedules interrupt disable */ 962 /* Last user schedules interrupt disable */
1005 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && 963 if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
1006 (drm_vblank_offdelay > 0)) 964 (drm_vblank_offdelay > 0))
1007 mod_timer(&dev->vblank_disable_timer, 965 mod_timer(&dev->vblank_disable_timer,
1008 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); 966 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +983,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
1025 983
1026 spin_lock_irqsave(&dev->vbl_lock, irqflags); 984 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1027 vblank_disable_and_save(dev, crtc); 985 vblank_disable_and_save(dev, crtc);
1028 DRM_WAKEUP(&dev->vbl_queue[crtc]); 986 DRM_WAKEUP(&dev->vblank[crtc].queue);
1029 987
1030 /* Send any queued vblank events, lest the natives grow disquiet */ 988 /* Send any queued vblank events, lest the natives grow disquiet */
1031 seq = drm_vblank_count_and_time(dev, crtc, &now); 989 seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1025,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1067 * to avoid corrupting the count if multiple, mismatch calls occur), 1025 * to avoid corrupting the count if multiple, mismatch calls occur),
1068 * so that interrupts remain enabled in the interim. 1026 * so that interrupts remain enabled in the interim.
1069 */ 1027 */
1070 if (!dev->vblank_inmodeset[crtc]) { 1028 if (!dev->vblank[crtc].inmodeset) {
1071 dev->vblank_inmodeset[crtc] = 0x1; 1029 dev->vblank[crtc].inmodeset = 0x1;
1072 if (drm_vblank_get(dev, crtc) == 0) 1030 if (drm_vblank_get(dev, crtc) == 0)
1073 dev->vblank_inmodeset[crtc] |= 0x2; 1031 dev->vblank[crtc].inmodeset |= 0x2;
1074 } 1032 }
1075} 1033}
1076EXPORT_SYMBOL(drm_vblank_pre_modeset); 1034EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1041,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1083 if (!dev->num_crtcs) 1041 if (!dev->num_crtcs)
1084 return; 1042 return;
1085 1043
1086 if (dev->vblank_inmodeset[crtc]) { 1044 if (dev->vblank[crtc].inmodeset) {
1087 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1045 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1088 dev->vblank_disable_allowed = 1; 1046 dev->vblank_disable_allowed = true;
1089 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1047 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1090 1048
1091 if (dev->vblank_inmodeset[crtc] & 0x2) 1049 if (dev->vblank[crtc].inmodeset & 0x2)
1092 drm_vblank_put(dev, crtc); 1050 drm_vblank_put(dev, crtc);
1093 1051
1094 dev->vblank_inmodeset[crtc] = 0; 1052 dev->vblank[crtc].inmodeset = 0;
1095 } 1053 }
1096} 1054}
1097EXPORT_SYMBOL(drm_vblank_post_modeset); 1055EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1246,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1288 1246
1289 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1247 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
1290 vblwait->request.sequence, crtc); 1248 vblwait->request.sequence, crtc);
1291 dev->last_vblank_wait[crtc] = vblwait->request.sequence; 1249 dev->vblank[crtc].last_wait = vblwait->request.sequence;
1292 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 1250 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
1293 (((drm_vblank_count(dev, crtc) - 1251 (((drm_vblank_count(dev, crtc) -
1294 vblwait->request.sequence) <= (1 << 23)) || 1252 vblwait->request.sequence) <= (1 << 23)) ||
1295 !dev->irq_enabled)); 1253 !dev->irq_enabled));
@@ -1367,7 +1325,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1367 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 1325 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
1368 1326
1369 /* Vblank irq handling disabled. Nothing to do. */ 1327 /* Vblank irq handling disabled. Nothing to do. */
1370 if (!dev->vblank_enabled[crtc]) { 1328 if (!dev->vblank[crtc].enabled) {
1371 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1329 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1372 return false; 1330 return false;
1373 } 1331 }
@@ -1377,7 +1335,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1377 */ 1335 */
1378 1336
1379 /* Get current timestamp and count. */ 1337 /* Get current timestamp and count. */
1380 vblcount = atomic_read(&dev->_vblank_count[crtc]); 1338 vblcount = atomic_read(&dev->vblank[crtc].count);
1381 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1339 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1382 1340
1383 /* Compute time difference to timestamp of last vblank */ 1341 /* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1359,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1401 * the timestamp computed above. 1359 * the timestamp computed above.
1402 */ 1360 */
1403 smp_mb__before_atomic_inc(); 1361 smp_mb__before_atomic_inc();
1404 atomic_inc(&dev->_vblank_count[crtc]); 1362 atomic_inc(&dev->vblank[crtc].count);
1405 smp_mb__after_atomic_inc(); 1363 smp_mb__after_atomic_inc();
1406 } else { 1364 } else {
1407 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1365 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1408 crtc, (int) diff_ns); 1366 crtc, (int) diff_ns);
1409 } 1367 }
1410 1368
1411 DRM_WAKEUP(&dev->vbl_queue[crtc]); 1369 DRM_WAKEUP(&dev->vblank[crtc].queue);
1412 drm_handle_vblank_events(dev, crtc); 1370 drm_handle_vblank_events(dev, crtc);
1413 1371
1414 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1372 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index d752c96d6090..f6452682141b 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
86 if (drm_lock_take(&master->lock, lock->context)) { 86 if (drm_lock_take(&master->lock, lock->context)) {
87 master->lock.file_priv = file_priv; 87 master->lock.file_priv = file_priv;
88 master->lock.lock_time = jiffies; 88 master->lock.lock_time = jiffies;
89 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
90 break; /* Got lock */ 89 break; /* Got lock */
91 } 90 }
92 91
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
157 return -EINVAL; 156 return -EINVAL;
158 } 157 }
159 158
160 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
161
162 if (drm_lock_free(&master->lock, lock->context)) { 159 if (drm_lock_free(&master->lock, lock->context)) {
163 /* FIXME: Should really bail out here. */ 160 /* FIXME: Should really bail out here. */
164 } 161 }
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb62b757..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
707/** 707/**
708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters 708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters
709 * @p: mode 709 * @p: mode
710 * @adjust_flags: unused? (FIXME) 710 * @adjust_flags: a combination of adjustment flags
711 * 711 *
712 * LOCKING: 712 * LOCKING:
713 * None. 713 * None.
714 * 714 *
715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary. 715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
716 *
717 * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
718 * interlaced modes.
719 * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
720 * buffers containing two eyes (only adjust the timings when needed, eg. for
721 * "frame packing" or "side by side full").
716 */ 722 */
717void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) 723void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
718{ 724{
719 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN)) 725 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
720 return; 726 return;
721 727
728 p->crtc_clock = p->clock;
722 p->crtc_hdisplay = p->hdisplay; 729 p->crtc_hdisplay = p->hdisplay;
723 p->crtc_hsync_start = p->hsync_start; 730 p->crtc_hsync_start = p->hsync_start;
724 p->crtc_hsync_end = p->hsync_end; 731 p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
752 p->crtc_vtotal *= p->vscan; 759 p->crtc_vtotal *= p->vscan;
753 } 760 }
754 761
762 if (adjust_flags & CRTC_STEREO_DOUBLE) {
763 unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
764
765 switch (layout) {
766 case DRM_MODE_FLAG_3D_FRAME_PACKING:
767 p->crtc_clock *= 2;
768 p->crtc_vdisplay += p->crtc_vtotal;
769 p->crtc_vsync_start += p->crtc_vtotal;
770 p->crtc_vsync_end += p->crtc_vtotal;
771 p->crtc_vtotal += p->crtc_vtotal;
772 break;
773 }
774 }
775
755 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay); 776 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
756 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); 777 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
757 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); 778 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
830 } else if (mode1->clock != mode2->clock) 851 } else if (mode1->clock != mode2->clock)
831 return false; 852 return false;
832 853
833 return drm_mode_equal_no_clocks(mode1, mode2); 854 if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
855 (mode2->flags & DRM_MODE_FLAG_3D_MASK))
856 return false;
857
858 return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
834} 859}
835EXPORT_SYMBOL(drm_mode_equal); 860EXPORT_SYMBOL(drm_mode_equal);
836 861
837/** 862/**
838 * drm_mode_equal_no_clocks - test modes for equality 863 * drm_mode_equal_no_clocks_no_stereo - test modes for equality
839 * @mode1: first mode 864 * @mode1: first mode
840 * @mode2: second mode 865 * @mode2: second mode
841 * 866 *
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
843 * None. 868 * None.
844 * 869 *
845 * Check to see if @mode1 and @mode2 are equivalent, but 870 * Check to see if @mode1 and @mode2 are equivalent, but
846 * don't check the pixel clocks. 871 * don't check the pixel clocks nor the stereo layout.
847 * 872 *
848 * RETURNS: 873 * RETURNS:
849 * True if the modes are equal, false otherwise. 874 * True if the modes are equal, false otherwise.
850 */ 875 */
851bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) 876bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
877 const struct drm_display_mode *mode2)
852{ 878{
853 if (mode1->hdisplay == mode2->hdisplay && 879 if (mode1->hdisplay == mode2->hdisplay &&
854 mode1->hsync_start == mode2->hsync_start && 880 mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
860 mode1->vsync_end == mode2->vsync_end && 886 mode1->vsync_end == mode2->vsync_end &&
861 mode1->vtotal == mode2->vtotal && 887 mode1->vtotal == mode2->vtotal &&
862 mode1->vscan == mode2->vscan && 888 mode1->vscan == mode2->vscan &&
863 mode1->flags == mode2->flags) 889 (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
890 (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
864 return true; 891 return true;
865 892
866 return false; 893 return false;
867} 894}
868EXPORT_SYMBOL(drm_mode_equal_no_clocks); 895EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
869 896
870/** 897/**
871 * drm_mode_validate_size - make sure modes adhere to size constraints 898 * drm_mode_validate_size - make sure modes adhere to size constraints
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1f96cee6eee8..f00d7a9671ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
322 322
323 DRM_DEBUG("\n"); 323 DRM_DEBUG("\n");
324 324
325 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 325 dev = drm_dev_alloc(driver, &pdev->dev);
326 if (!dev) 326 if (!dev)
327 return -ENOMEM; 327 return -ENOMEM;
328 328
329 ret = pci_enable_device(pdev); 329 ret = pci_enable_device(pdev);
330 if (ret) 330 if (ret)
331 goto err_g1; 331 goto err_free;
332 332
333 dev->pdev = pdev; 333 dev->pdev = pdev;
334 dev->dev = &pdev->dev;
335
336 dev->pci_device = pdev->device;
337 dev->pci_vendor = pdev->vendor;
338
339#ifdef __alpha__ 334#ifdef __alpha__
340 dev->hose = pdev->sysdata; 335 dev->hose = pdev->sysdata;
341#endif 336#endif
342 337
343 mutex_lock(&drm_global_mutex); 338 if (drm_core_check_feature(dev, DRIVER_MODESET))
344
345 if ((ret = drm_fill_in_dev(dev, ent, driver))) {
346 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
347 goto err_g2;
348 }
349
350 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
351 pci_set_drvdata(pdev, dev); 339 pci_set_drvdata(pdev, dev);
352 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
353 if (ret)
354 goto err_g2;
355 }
356
357 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
358 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
359 if (ret)
360 goto err_g21;
361 }
362
363 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
364 goto err_g3;
365
366 if (dev->driver->load) {
367 ret = dev->driver->load(dev, ent->driver_data);
368 if (ret)
369 goto err_g4;
370 }
371 340
372 /* setup the grouping for the legacy output */ 341 ret = drm_dev_register(dev, ent->driver_data);
373 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 342 if (ret)
374 ret = drm_mode_group_init_legacy_group(dev, 343 goto err_pci;
375 &dev->primary->mode_group);
376 if (ret)
377 goto err_g4;
378 }
379
380 list_add_tail(&dev->driver_item, &driver->device_list);
381 344
382 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 345 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
383 driver->name, driver->major, driver->minor, driver->patchlevel, 346 driver->name, driver->major, driver->minor, driver->patchlevel,
384 driver->date, pci_name(pdev), dev->primary->index); 347 driver->date, pci_name(pdev), dev->primary->index);
385 348
386 mutex_unlock(&drm_global_mutex);
387 return 0; 349 return 0;
388 350
389err_g4: 351err_pci:
390 drm_put_minor(&dev->primary);
391err_g3:
392 if (dev->render)
393 drm_put_minor(&dev->render);
394err_g21:
395 if (drm_core_check_feature(dev, DRIVER_MODESET))
396 drm_put_minor(&dev->control);
397err_g2:
398 pci_disable_device(pdev); 352 pci_disable_device(pdev);
399err_g1: 353err_free:
400 kfree(dev); 354 drm_dev_free(dev);
401 mutex_unlock(&drm_global_mutex);
402 return ret; 355 return ret;
403} 356}
404EXPORT_SYMBOL(drm_get_pci_dev); 357EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index f7a18c6ba4c4..fc24fee8ec83 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
47 47
48 DRM_DEBUG("\n"); 48 DRM_DEBUG("\n");
49 49
50 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 50 dev = drm_dev_alloc(driver, &platdev->dev);
51 if (!dev) 51 if (!dev)
52 return -ENOMEM; 52 return -ENOMEM;
53 53
54 dev->platformdev = platdev; 54 dev->platformdev = platdev;
55 dev->dev = &platdev->dev;
56 55
57 mutex_lock(&drm_global_mutex); 56 ret = drm_dev_register(dev, 0);
58
59 ret = drm_fill_in_dev(dev, NULL, driver);
60
61 if (ret) {
62 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
63 goto err_g1;
64 }
65
66 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
67 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
68 if (ret)
69 goto err_g1;
70 }
71
72 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
73 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
74 if (ret)
75 goto err_g11;
76 }
77
78 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
79 if (ret) 57 if (ret)
80 goto err_g2; 58 goto err_free;
81
82 if (dev->driver->load) {
83 ret = dev->driver->load(dev, 0);
84 if (ret)
85 goto err_g3;
86 }
87
88 /* setup the grouping for the legacy output */
89 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
90 ret = drm_mode_group_init_legacy_group(dev,
91 &dev->primary->mode_group);
92 if (ret)
93 goto err_g3;
94 }
95
96 list_add_tail(&dev->driver_item, &driver->device_list);
97
98 mutex_unlock(&drm_global_mutex);
99 59
100 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 60 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
101 driver->name, driver->major, driver->minor, driver->patchlevel, 61 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
103 63
104 return 0; 64 return 0;
105 65
106err_g3: 66err_free:
107 drm_put_minor(&dev->primary); 67 drm_dev_free(dev);
108err_g2:
109 if (dev->render)
110 drm_put_minor(&dev->render);
111err_g11:
112 if (drm_core_check_feature(dev, DRIVER_MODESET))
113 drm_put_minor(&dev->control);
114err_g1:
115 kfree(dev);
116 mutex_unlock(&drm_global_mutex);
117 return ret; 68 return ret;
118} 69}
119 70
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 276d470f7b3e..56805c39c906 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
637 unsigned count; 637 unsigned count;
638 struct scatterlist *sg; 638 struct scatterlist *sg;
639 struct page *page; 639 struct page *page;
640 u32 len, offset; 640 u32 len;
641 int pg_index; 641 int pg_index;
642 dma_addr_t addr; 642 dma_addr_t addr;
643 643
644 pg_index = 0; 644 pg_index = 0;
645 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 645 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
646 len = sg->length; 646 len = sg->length;
647 offset = sg->offset;
648 page = sg_page(sg); 647 page = sg_page(sg);
649 addr = sg_dma_address(sg); 648 addr = sg_dma_address(sg);
650 649
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 39d864576be4..26055abf94ee 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -254,70 +254,6 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
254 return 0; 254 return 0;
255} 255}
256 256
257int drm_fill_in_dev(struct drm_device *dev,
258 const struct pci_device_id *ent,
259 struct drm_driver *driver)
260{
261 int retcode;
262
263 INIT_LIST_HEAD(&dev->filelist);
264 INIT_LIST_HEAD(&dev->ctxlist);
265 INIT_LIST_HEAD(&dev->vmalist);
266 INIT_LIST_HEAD(&dev->maplist);
267 INIT_LIST_HEAD(&dev->vblank_event_list);
268
269 spin_lock_init(&dev->count_lock);
270 spin_lock_init(&dev->event_lock);
271 mutex_init(&dev->struct_mutex);
272 mutex_init(&dev->ctxlist_mutex);
273
274 if (drm_ht_create(&dev->map_hash, 12)) {
275 return -ENOMEM;
276 }
277
278 /* the DRM has 6 basic counters */
279 dev->counters = 6;
280 dev->types[0] = _DRM_STAT_LOCK;
281 dev->types[1] = _DRM_STAT_OPENS;
282 dev->types[2] = _DRM_STAT_CLOSES;
283 dev->types[3] = _DRM_STAT_IOCTLS;
284 dev->types[4] = _DRM_STAT_LOCKS;
285 dev->types[5] = _DRM_STAT_UNLOCKS;
286
287 dev->driver = driver;
288
289 if (dev->driver->bus->agp_init) {
290 retcode = dev->driver->bus->agp_init(dev);
291 if (retcode)
292 goto error_out_unreg;
293 }
294
295
296
297 retcode = drm_ctxbitmap_init(dev);
298 if (retcode) {
299 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
300 goto error_out_unreg;
301 }
302
303 if (driver->driver_features & DRIVER_GEM) {
304 retcode = drm_gem_init(dev);
305 if (retcode) {
306 DRM_ERROR("Cannot initialize graphics execution "
307 "manager (GEM)\n");
308 goto error_out_unreg;
309 }
310 }
311
312 return 0;
313
314 error_out_unreg:
315 drm_lastclose(dev);
316 return retcode;
317}
318EXPORT_SYMBOL(drm_fill_in_dev);
319
320
321/** 257/**
322 * Get a secondary minor number. 258 * Get a secondary minor number.
323 * 259 *
@@ -427,66 +363,237 @@ static void drm_unplug_minor(struct drm_minor *minor)
427 */ 363 */
428void drm_put_dev(struct drm_device *dev) 364void drm_put_dev(struct drm_device *dev)
429{ 365{
430 struct drm_driver *driver;
431 struct drm_map_list *r_list, *list_temp;
432
433 DRM_DEBUG("\n"); 366 DRM_DEBUG("\n");
434 367
435 if (!dev) { 368 if (!dev) {
436 DRM_ERROR("cleanup called no dev\n"); 369 DRM_ERROR("cleanup called no dev\n");
437 return; 370 return;
438 } 371 }
439 driver = dev->driver;
440 372
441 drm_lastclose(dev); 373 drm_dev_unregister(dev);
374 drm_dev_free(dev);
375}
376EXPORT_SYMBOL(drm_put_dev);
442 377
443 if (dev->driver->unload) 378void drm_unplug_dev(struct drm_device *dev)
444 dev->driver->unload(dev); 379{
380 /* for a USB device */
381 if (drm_core_check_feature(dev, DRIVER_MODESET))
382 drm_unplug_minor(dev->control);
383 if (dev->render)
384 drm_unplug_minor(dev->render);
385 drm_unplug_minor(dev->primary);
445 386
446 if (dev->driver->bus->agp_destroy) 387 mutex_lock(&drm_global_mutex);
447 dev->driver->bus->agp_destroy(dev);
448 388
449 drm_vblank_cleanup(dev); 389 drm_device_set_unplugged(dev);
450 390
451 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 391 if (dev->open_count == 0) {
452 drm_rmmap(dev, r_list->map); 392 drm_put_dev(dev);
453 drm_ht_remove(&dev->map_hash); 393 }
394 mutex_unlock(&drm_global_mutex);
395}
396EXPORT_SYMBOL(drm_unplug_dev);
454 397
455 drm_ctxbitmap_cleanup(dev); 398/**
399 * drm_dev_alloc - Allocate new drm device
400 * @driver: DRM driver to allocate device for
401 * @parent: Parent device object
402 *
403 * Allocate and initialize a new DRM device. No device registration is done.
404 * Call drm_dev_register() to advertice the device to user space and register it
405 * with other core subsystems.
406 *
407 * RETURNS:
408 * Pointer to new DRM device, or NULL if out of memory.
409 */
410struct drm_device *drm_dev_alloc(struct drm_driver *driver,
411 struct device *parent)
412{
413 struct drm_device *dev;
414 int ret;
456 415
457 if (drm_core_check_feature(dev, DRIVER_MODESET)) 416 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
458 drm_put_minor(&dev->control); 417 if (!dev)
418 return NULL;
459 419
460 if (dev->render) 420 dev->dev = parent;
461 drm_put_minor(&dev->render); 421 dev->driver = driver;
422
423 INIT_LIST_HEAD(&dev->filelist);
424 INIT_LIST_HEAD(&dev->ctxlist);
425 INIT_LIST_HEAD(&dev->vmalist);
426 INIT_LIST_HEAD(&dev->maplist);
427 INIT_LIST_HEAD(&dev->vblank_event_list);
428
429 spin_lock_init(&dev->count_lock);
430 spin_lock_init(&dev->event_lock);
431 mutex_init(&dev->struct_mutex);
432 mutex_init(&dev->ctxlist_mutex);
433
434 if (drm_ht_create(&dev->map_hash, 12))
435 goto err_free;
462 436
463 if (driver->driver_features & DRIVER_GEM) 437 ret = drm_ctxbitmap_init(dev);
438 if (ret) {
439 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
440 goto err_ht;
441 }
442
443 if (driver->driver_features & DRIVER_GEM) {
444 ret = drm_gem_init(dev);
445 if (ret) {
446 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
447 goto err_ctxbitmap;
448 }
449 }
450
451 return dev;
452
453err_ctxbitmap:
454 drm_ctxbitmap_cleanup(dev);
455err_ht:
456 drm_ht_remove(&dev->map_hash);
457err_free:
458 kfree(dev);
459 return NULL;
460}
461EXPORT_SYMBOL(drm_dev_alloc);
462
463/**
464 * drm_dev_free - Free DRM device
465 * @dev: DRM device to free
466 *
467 * Free a DRM device that has previously been allocated via drm_dev_alloc().
468 * You must not use kfree() instead or you will leak memory.
469 *
470 * This must not be called once the device got registered. Use drm_put_dev()
471 * instead, which then calls drm_dev_free().
472 */
473void drm_dev_free(struct drm_device *dev)
474{
475 if (dev->driver->driver_features & DRIVER_GEM)
464 drm_gem_destroy(dev); 476 drm_gem_destroy(dev);
465 477
466 drm_put_minor(&dev->primary); 478 drm_ctxbitmap_cleanup(dev);
479 drm_ht_remove(&dev->map_hash);
467 480
468 list_del(&dev->driver_item);
469 kfree(dev->devname); 481 kfree(dev->devname);
470 kfree(dev); 482 kfree(dev);
471} 483}
472EXPORT_SYMBOL(drm_put_dev); 484EXPORT_SYMBOL(drm_dev_free);
473 485
474void drm_unplug_dev(struct drm_device *dev) 486/**
487 * drm_dev_register - Register DRM device
488 * @dev: Device to register
489 *
490 * Register the DRM device @dev with the system, advertise device to user-space
491 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
492 * previously.
493 *
494 * Never call this twice on any device!
495 *
496 * RETURNS:
497 * 0 on success, negative error code on failure.
498 */
499int drm_dev_register(struct drm_device *dev, unsigned long flags)
475{ 500{
476 /* for a USB device */ 501 int ret;
477 if (drm_core_check_feature(dev, DRIVER_MODESET))
478 drm_unplug_minor(dev->control);
479 if (dev->render)
480 drm_unplug_minor(dev->render);
481 drm_unplug_minor(dev->primary);
482 502
483 mutex_lock(&drm_global_mutex); 503 mutex_lock(&drm_global_mutex);
484 504
485 drm_device_set_unplugged(dev); 505 if (dev->driver->bus->agp_init) {
506 ret = dev->driver->bus->agp_init(dev);
507 if (ret)
508 goto out_unlock;
509 }
486 510
487 if (dev->open_count == 0) { 511 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
488 drm_put_dev(dev); 512 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
513 if (ret)
514 goto err_agp;
489 } 515 }
516
517 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
518 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
519 if (ret)
520 goto err_control_node;
521 }
522
523 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
524 if (ret)
525 goto err_render_node;
526
527 if (dev->driver->load) {
528 ret = dev->driver->load(dev, flags);
529 if (ret)
530 goto err_primary_node;
531 }
532
533 /* setup grouping for legacy outputs */
534 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
535 ret = drm_mode_group_init_legacy_group(dev,
536 &dev->primary->mode_group);
537 if (ret)
538 goto err_unload;
539 }
540
541 list_add_tail(&dev->driver_item, &dev->driver->device_list);
542
543 ret = 0;
544 goto out_unlock;
545
546err_unload:
547 if (dev->driver->unload)
548 dev->driver->unload(dev);
549err_primary_node:
550 drm_put_minor(&dev->primary);
551err_render_node:
552 if (dev->render)
553 drm_put_minor(&dev->render);
554err_control_node:
555 if (dev->control)
556 drm_put_minor(&dev->control);
557err_agp:
558 if (dev->driver->bus->agp_destroy)
559 dev->driver->bus->agp_destroy(dev);
560out_unlock:
490 mutex_unlock(&drm_global_mutex); 561 mutex_unlock(&drm_global_mutex);
562 return ret;
491} 563}
492EXPORT_SYMBOL(drm_unplug_dev); 564EXPORT_SYMBOL(drm_dev_register);
565
566/**
567 * drm_dev_unregister - Unregister DRM device
568 * @dev: Device to unregister
569 *
570 * Unregister the DRM device from the system. This does the reverse of
571 * drm_dev_register() but does not deallocate the device. The caller must call
572 * drm_dev_free() to free all resources.
573 */
574void drm_dev_unregister(struct drm_device *dev)
575{
576 struct drm_map_list *r_list, *list_temp;
577
578 drm_lastclose(dev);
579
580 if (dev->driver->unload)
581 dev->driver->unload(dev);
582
583 if (dev->driver->bus->agp_destroy)
584 dev->driver->bus->agp_destroy(dev);
585
586 drm_vblank_cleanup(dev);
587
588 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
589 drm_rmmap(dev, r_list->map);
590
591 if (dev->control)
592 drm_put_minor(&dev->control);
593 if (dev->render)
594 drm_put_minor(&dev->render);
595 drm_put_minor(&dev->primary);
596
597 list_del(&dev->driver_item);
598}
599EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 87664723b9ce..b179b70e7853 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
7 struct drm_driver *driver) 7 struct drm_driver *driver)
8{ 8{
9 struct drm_device *dev; 9 struct drm_device *dev;
10 struct usb_device *usbdev;
11 int ret; 10 int ret;
12 11
13 DRM_DEBUG("\n"); 12 DRM_DEBUG("\n");
14 13
15 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 14 dev = drm_dev_alloc(driver, &interface->dev);
16 if (!dev) 15 if (!dev)
17 return -ENOMEM; 16 return -ENOMEM;
18 17
19 usbdev = interface_to_usbdev(interface); 18 dev->usbdev = interface_to_usbdev(interface);
20 dev->usbdev = usbdev;
21 dev->dev = &interface->dev;
22
23 mutex_lock(&drm_global_mutex);
24
25 ret = drm_fill_in_dev(dev, NULL, driver);
26 if (ret) {
27 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
28 goto err_g1;
29 }
30
31 usb_set_intfdata(interface, dev); 19 usb_set_intfdata(interface, dev);
32 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
33 if (ret)
34 goto err_g1;
35
36 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
37 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
38 if (ret)
39 goto err_g11;
40 }
41 20
42 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 21 ret = drm_dev_register(dev, 0);
43 if (ret) 22 if (ret)
44 goto err_g2; 23 goto err_free;
45
46 if (dev->driver->load) {
47 ret = dev->driver->load(dev, 0);
48 if (ret)
49 goto err_g3;
50 }
51
52 /* setup the grouping for the legacy output */
53 ret = drm_mode_group_init_legacy_group(dev,
54 &dev->primary->mode_group);
55 if (ret)
56 goto err_g3;
57
58 list_add_tail(&dev->driver_item, &driver->device_list);
59
60 mutex_unlock(&drm_global_mutex);
61 24
62 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 25 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
63 driver->name, driver->major, driver->minor, driver->patchlevel, 26 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
65 28
66 return 0; 29 return 0;
67 30
68err_g3: 31err_free:
69 drm_put_minor(&dev->primary); 32 drm_dev_free(dev);
70err_g2:
71 if (dev->render)
72 drm_put_minor(&dev->render);
73err_g11:
74 drm_put_minor(&dev->control);
75err_g1:
76 kfree(dev);
77 mutex_unlock(&drm_global_mutex);
78 return ret; 33 return ret;
79 34
80} 35}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 45b6ef595965..f227f544aa36 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@ config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
5 select FB_CFB_FILLRECT 6 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 7 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT 8 select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef78ca85..3a1e6d9b25f7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
264 .get_vblank_counter = drm_vblank_count, 264 .get_vblank_counter = drm_vblank_count,
265 .enable_vblank = exynos_drm_crtc_enable_vblank, 265 .enable_vblank = exynos_drm_crtc_enable_vblank,
266 .disable_vblank = exynos_drm_crtc_disable_vblank, 266 .disable_vblank = exynos_drm_crtc_disable_vblank,
267 .gem_init_object = exynos_drm_gem_init_object,
268 .gem_free_object = exynos_drm_gem_free_object, 267 .gem_free_object = exynos_drm_gem_free_object,
269 .gem_vm_ops = &exynos_drm_gem_vm_ops, 268 .gem_vm_ops = &exynos_drm_gem_vm_ops,
270 .dumb_create = exynos_drm_gem_dumb_create, 269 .dumb_create = exynos_drm_gem_dumb_create,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 868a14d52995..23da72b5eae9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
716{ 716{
717 /* 717 /*
718 * enable drm irq mode. 718 * enable drm irq mode.
719 * - with irq_enabled = 1, we can use the vblank feature. 719 * - with irq_enabled = true, we can use the vblank feature.
720 * 720 *
721 * P.S. note that we wouldn't use drm irq handler but 721 * P.S. note that we wouldn't use drm irq handler but
722 * just specific driver own one instead because 722 * just specific driver own one instead because
723 * drm framework supports only one irq handler. 723 * drm framework supports only one irq handler.
724 */ 724 */
725 drm_dev->irq_enabled = 1; 725 drm_dev->irq_enabled = true;
726 726
727 /* 727 /*
728 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 728 * with vblank_disable_allowed = true, vblank interrupt will be disabled
729 * by drm timer once a current process gives up ownership of 729 * by drm timer once a current process gives up ownership of
730 * vblank event.(after drm_vblank_put function is called) 730 * vblank event.(after drm_vblank_put function is called)
731 */ 731 */
732 drm_dev->vblank_disable_allowed = 1; 732 drm_dev->vblank_disable_allowed = true;
733 733
734 /* attach this sub driver to iommu mapping if supported. */ 734 /* attach this sub driver to iommu mapping if supported. */
735 if (is_drm_iommu_supported(drm_dev)) 735 if (is_drm_iommu_supported(drm_dev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 49f9cd232757..1ade191d84f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
630 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 630 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
631} 631}
632 632
633int exynos_drm_gem_init_object(struct drm_gem_object *obj)
634{
635 return 0;
636}
637
638void exynos_drm_gem_free_object(struct drm_gem_object *obj) 633void exynos_drm_gem_free_object(struct drm_gem_object *obj)
639{ 634{
640 struct exynos_drm_gem_obj *exynos_gem_obj; 635 struct exynos_drm_gem_obj *exynos_gem_obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 09555afdfe9c..702ec3abe85c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
135 unsigned int gem_handle, 135 unsigned int gem_handle,
136 struct drm_file *file_priv); 136 struct drm_file *file_priv);
137 137
138/* initialize gem object. */
139int exynos_drm_gem_init_object(struct drm_gem_object *obj);
140
141/* free gem object. */ 138/* free gem object. */
142void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); 139void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
143 140
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4400330e4449..ddaaedde173d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
101{ 101{
102 struct vidi_context *ctx = get_vidi_context(dev); 102 struct vidi_context *ctx = get_vidi_context(dev);
103 struct edid *edid; 103 struct edid *edid;
104 int edid_len;
105 104
106 /* 105 /*
107 * the edid data comes from user side and it would be set 106 * the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
112 return ERR_PTR(-EFAULT); 111 return ERR_PTR(-EFAULT);
113 } 112 }
114 113
115 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; 114 edid = drm_edid_duplicate(ctx->raw_edid);
116 edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
117 if (!edid) { 115 if (!edid) {
118 DRM_DEBUG_KMS("failed to allocate edid\n"); 116 DRM_DEBUG_KMS("failed to allocate edid\n");
119 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
385{ 383{
386 /* 384 /*
387 * enable drm irq mode. 385 * enable drm irq mode.
388 * - with irq_enabled = 1, we can use the vblank feature. 386 * - with irq_enabled = true, we can use the vblank feature.
389 * 387 *
390 * P.S. note that we wouldn't use drm irq handler but 388 * P.S. note that we wouldn't use drm irq handler but
391 * just specific driver own one instead because 389 * just specific driver own one instead because
392 * drm framework supports only one irq handler. 390 * drm framework supports only one irq handler.
393 */ 391 */
394 drm_dev->irq_enabled = 1; 392 drm_dev->irq_enabled = true;
395 393
396 /* 394 /*
397 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 395 * with vblank_disable_allowed = true, vblank interrupt will be disabled
398 * by drm timer once a current process gives up ownership of 396 * by drm timer once a current process gives up ownership of
399 * vblank event.(after drm_vblank_put function is called) 397 * vblank event.(after drm_vblank_put function is called)
400 */ 398 */
401 drm_dev->vblank_disable_allowed = 1; 399 drm_dev->vblank_disable_allowed = true;
402 400
403 return 0; 401 return 0;
404} 402}
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
485 struct exynos_drm_manager *manager; 483 struct exynos_drm_manager *manager;
486 struct exynos_drm_display_ops *display_ops; 484 struct exynos_drm_display_ops *display_ops;
487 struct drm_exynos_vidi_connection *vidi = data; 485 struct drm_exynos_vidi_connection *vidi = data;
488 int edid_len;
489 486
490 if (!vidi) { 487 if (!vidi) {
491 DRM_DEBUG_KMS("user data for vidi is null.\n"); 488 DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
524 DRM_DEBUG_KMS("edid data is invalid.\n"); 521 DRM_DEBUG_KMS("edid data is invalid.\n");
525 return -EINVAL; 522 return -EINVAL;
526 } 523 }
527 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; 524 ctx->raw_edid = drm_edid_duplicate(raw_edid);
528 ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
529 if (!ctx->raw_edid) { 525 if (!ctx->raw_edid) {
530 DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); 526 DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
531 return -ENOMEM; 527 return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1f6e2dfaaeae..508cf99a292d 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -5,6 +5,7 @@ config DRM_GMA500
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_IMAGEBLIT 6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select DRM_TTM 9 select DRM_TTM
9 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 10 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
10 select ACPI_VIDEO if ACPI 11 select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 10ae8c52d06f..e2db48a81ed0 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -29,11 +29,6 @@
29#include <drm/drm_vma_manager.h> 29#include <drm/drm_vma_manager.h>
30#include "psb_drv.h" 30#include "psb_drv.h"
31 31
32int psb_gem_init_object(struct drm_gem_object *obj)
33{
34 return -EINVAL;
35}
36
37void psb_gem_free_object(struct drm_gem_object *obj) 32void psb_gem_free_object(struct drm_gem_object *obj)
38{ 33{
39 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); 34 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index fcb4e9ff1f20..dd607f820a26 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -359,7 +359,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
359 359
360 drm_irq_install(dev); 360 drm_irq_install(dev);
361 361
362 dev->vblank_disable_allowed = 1; 362 dev->vblank_disable_allowed = true;
363 363
364 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 364 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
365 365
@@ -646,7 +646,6 @@ static struct drm_driver driver = {
646 .preclose = psb_driver_preclose, 646 .preclose = psb_driver_preclose,
647 .postclose = psb_driver_close, 647 .postclose = psb_driver_close,
648 648
649 .gem_init_object = psb_gem_init_object,
650 .gem_free_object = psb_gem_free_object, 649 .gem_free_object = psb_gem_free_object,
651 .gem_vm_ops = &psb_gem_vm_ops, 650 .gem_vm_ops = &psb_gem_vm_ops,
652 .dumb_create = psb_gem_dumb_create, 651 .dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 4535ac7708f8..0bab46bd73d2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -44,10 +44,10 @@ enum {
44 CHIP_MFLD_0130 = 3, /* Medfield */ 44 CHIP_MFLD_0130 = 3, /* Medfield */
45}; 45};
46 46
47#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) 47#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
48#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) 48#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100)
49#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) 49#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0) 50#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
51 51
52/* 52/*
53 * Driver definitions 53 * Driver definitions
@@ -837,7 +837,6 @@ extern const struct drm_connector_helper_funcs
837extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; 837extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
838 838
839/* gem.c */ 839/* gem.c */
840extern int psb_gem_init_object(struct drm_gem_object *obj);
841extern void psb_gem_free_object(struct drm_gem_object *obj); 840extern void psb_gem_free_object(struct drm_gem_object *obj);
842extern int psb_gem_get_aperture(struct drm_device *dev, void *data, 841extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
843 struct drm_file *file); 842 struct drm_file *file);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 029eccf30137..ba4830342d34 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
271 271
272 if (gma_power_is_on(dev)) 272 if (gma_power_is_on(dev))
273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
274 if (dev->vblank_enabled[0]) 274 if (dev->vblank[0].enabled)
275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
276 if (dev->vblank_enabled[1]) 276 if (dev->vblank[1].enabled)
277 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; 277 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
278 278
279 /* FIXME: Handle Medfield irq mask 279 /* FIXME: Handle Medfield irq mask
280 if (dev->vblank_enabled[1]) 280 if (dev->vblank[1].enabled)
281 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG; 281 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
282 if (dev->vblank_enabled[2]) 282 if (dev->vblank[2].enabled)
283 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG; 283 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
284 */ 284 */
285 285
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
307 307
308 if (dev->vblank_enabled[0]) 308 if (dev->vblank[0].enabled)
309 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 309 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
310 else 310 else
311 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 311 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
312 312
313 if (dev->vblank_enabled[1]) 313 if (dev->vblank[1].enabled)
314 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 314 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
315 else 315 else
316 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 316 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
317 317
318 if (dev->vblank_enabled[2]) 318 if (dev->vblank[2].enabled)
319 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 319 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
320 else 320 else
321 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 321 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
339 339
340 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 340 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
341 341
342 if (dev->vblank_enabled[0]) 342 if (dev->vblank[0].enabled)
343 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 343 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
344 344
345 if (dev->vblank_enabled[1]) 345 if (dev->vblank[1].enabled)
346 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 346 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
347 347
348 if (dev->vblank_enabled[2]) 348 if (dev->vblank[2].enabled)
349 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 349 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
350 350
351 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | 351 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
456{ 456{
457 unsigned int cur_vblank; 457 unsigned int cur_vblank;
458 int ret = 0; 458 int ret = 0;
459 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 459 DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
460 (((cur_vblank = atomic_read(counter)) 460 (((cur_vblank = atomic_read(counter))
461 - *sequence) <= (1 << 23))); 461 - *sequence) <= (1 << 23)));
462 *sequence = cur_vblank; 462 *sequence = cur_vblank;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab1892eb1074..249fdff305c6 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
944 dma->buflist[vertex->idx], 944 dma->buflist[vertex->idx],
945 vertex->discard, vertex->used); 945 vertex->discard, vertex->used);
946 946
947 atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
948 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
949 sarea_priv->last_enqueue = dev_priv->counter - 1; 947 sarea_priv->last_enqueue = dev_priv->counter - 1;
950 sarea_priv->last_dispatch = (int)hw_status[5]; 948 sarea_priv->last_dispatch = (int)hw_status[5];
951 949
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
1105 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, 1103 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1106 mc->last_render); 1104 mc->last_render);
1107 1105
1108 atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1109 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1110 sarea_priv->last_enqueue = dev_priv->counter - 1; 1106 sarea_priv->last_enqueue = dev_priv->counter - 1;
1111 sarea_priv->last_dispatch = (int)hw_status[5]; 1107 sarea_priv->last_dispatch = (int)hw_status[5];
1112 1108
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
1197 1193
1198int i810_driver_load(struct drm_device *dev, unsigned long flags) 1194int i810_driver_load(struct drm_device *dev, unsigned long flags)
1199{ 1195{
1200 /* i810 has 4 more counters */
1201 dev->counters += 4;
1202 dev->types[6] = _DRM_STAT_IRQ;
1203 dev->types[7] = _DRM_STAT_PRIMARY;
1204 dev->types[8] = _DRM_STAT_SECONDARY;
1205 dev->types[9] = _DRM_STAT_DMA;
1206
1207 pci_set_master(dev->pdev); 1196 pci_set_master(dev->pdev);
1208 1197
1209 return 0; 1198 return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 000000000000..6199d0b5b958
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,67 @@
1config DRM_I915
2 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
3 depends on DRM
4 depends on AGP
5 depends on AGP_INTEL
6 # we need shmfs for the swappable backing store, and in particular
7 # the shmem_readpage() which depends upon tmpfs
8 select SHMEM
9 select TMPFS
10 select DRM_KMS_HELPER
11 # i915 depends on ACPI_VIDEO when ACPI is enabled
12 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
13 select BACKLIGHT_LCD_SUPPORT if ACPI
14 select BACKLIGHT_CLASS_DEVICE if ACPI
15 select VIDEO_OUTPUT_CONTROL if ACPI
16 select INPUT if ACPI
17 select ACPI_VIDEO if ACPI
18 select ACPI_BUTTON if ACPI
19 help
20 Choose this option if you have a system that has "Intel Graphics
21 Media Accelerator" or "HD Graphics" integrated graphics,
22 including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
23 G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
24 Core i5, Core i7 as well as Atom CPUs with integrated graphics.
25 If M is selected, the module will be called i915. AGP support
26 is required for this driver to work. This driver is used by
27 the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
28 replaces the older i830 module that supported a subset of the
29 hardware in older X.org releases.
30
31 Note that the older i810/i815 chipsets require the use of the
32 i810 driver instead, and the Atom z5xx series has an entirely
33 different implementation.
34
35config DRM_I915_KMS
36 bool "Enable modesetting on intel by default"
37 depends on DRM_I915
38 help
39 Choose this option if you want kernel modesetting enabled by default,
40 and you have a new enough userspace to support this. Running old
41 userspaces with this enabled will cause pain. Note that this causes
42 the driver to bind to PCI devices, which precludes loading things
43 like intelfb.
44
45config DRM_I915_FBDEV
46 bool "Enable legacy fbdev support for the modesettting intel driver"
47 depends on DRM_I915
48 select DRM_KMS_FB_HELPER
49 select FB_CFB_FILLRECT
50 select FB_CFB_COPYAREA
51 select FB_CFB_IMAGEBLIT
52 default y
53 help
54 Choose this option if you have a need for the legacy fbdev
55 support. Note that this support also provide the linux console
56 support on top of the intel modesetting driver.
57
58config DRM_I915_PRELIMINARY_HW_SUPPORT
59 bool "Enable preliminary support for prerelease Intel hardware by default"
60 depends on DRM_I915
61 help
62 Choose this option if you have prerelease Intel hardware and want the
63 i915 driver to support it by default. You can enable such support at
64 runtime with the module option i915.preliminary_hw_support=1; this
65 option changes the default for that module option.
66
67 If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8449a84a0dc..41838eaa799c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
21 intel_display.o \ 21 intel_display.o \
22 intel_crt.o \ 22 intel_crt.o \
23 intel_lvds.o \ 23 intel_lvds.o \
24 intel_dsi.o \
25 intel_dsi_cmd.o \
26 intel_dsi_pll.o \
24 intel_bios.o \ 27 intel_bios.o \
25 intel_ddi.o \ 28 intel_ddi.o \
26 intel_dp.o \ 29 intel_dp.o \
@@ -30,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
30 intel_panel.o \ 33 intel_panel.o \
31 intel_pm.o \ 34 intel_pm.o \
32 intel_i2c.o \ 35 intel_i2c.o \
33 intel_fb.o \
34 intel_tv.o \ 36 intel_tv.o \
35 intel_dvo.o \ 37 intel_dvo.o \
36 intel_ringbuffer.o \ 38 intel_ringbuffer.o \
@@ -51,6 +53,8 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
51 53
52i915-$(CONFIG_ACPI) += intel_acpi.o 54i915-$(CONFIG_ACPI) += intel_acpi.o
53 55
56i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
57
54obj-$(CONFIG_DRM_I915) += i915.o 58obj-$(CONFIG_DRM_I915) += i915.o
55 59
56CFLAGS_i915_trace_points.o := -I$(src) 60CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad80100..312163379db9 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -77,17 +77,6 @@ struct intel_dvo_dev_ops {
77 struct drm_display_mode *mode); 77 struct drm_display_mode *mode);
78 78
79 /* 79 /*
80 * Callback to adjust the mode to be set in the CRTC.
81 *
82 * This allows an output to adjust the clock or even the entire set of
83 * timings, which is used for panels with fixed timings or for
84 * buses with clock limitations.
85 */
86 bool (*mode_fixup)(struct intel_dvo_device *dvo,
87 const struct drm_display_mode *mode,
88 struct drm_display_mode *adjusted_mode);
89
90 /*
91 * Callback for preparing mode changes on an output 80 * Callback for preparing mode changes on an output
92 */ 81 */
93 void (*prepare)(struct intel_dvo_device *dvo); 82 void (*prepare)(struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6f4cb5af185..7008aacfc3c9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,8 @@
27 */ 27 */
28 28
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/circ_buf.h>
31#include <linux/ctype.h>
30#include <linux/debugfs.h> 32#include <linux/debugfs.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
32#include <linux/export.h> 34#include <linux/export.h>
@@ -38,9 +40,6 @@
38#include <drm/i915_drm.h> 40#include <drm/i915_drm.h>
39#include "i915_drv.h" 41#include "i915_drv.h"
40 42
41#define DRM_I915_RING_DEBUG 1
42
43
44#if defined(CONFIG_DEBUG_FS) 43#if defined(CONFIG_DEBUG_FS)
45 44
46enum { 45enum {
@@ -54,6 +53,32 @@ static const char *yesno(int v)
54 return v ? "yes" : "no"; 53 return v ? "yes" : "no";
55} 54}
56 55
56/* As the drm_debugfs_init() routines are called before dev->dev_private is
57 * allocated we need to hook into the minor for release. */
58static int
59drm_add_fake_info_node(struct drm_minor *minor,
60 struct dentry *ent,
61 const void *key)
62{
63 struct drm_info_node *node;
64
65 node = kmalloc(sizeof(*node), GFP_KERNEL);
66 if (node == NULL) {
67 debugfs_remove(ent);
68 return -ENOMEM;
69 }
70
71 node->minor = minor;
72 node->dent = ent;
73 node->info_ent = (void *) key;
74
75 mutex_lock(&minor->debugfs_lock);
76 list_add(&node->list, &minor->debugfs_list);
77 mutex_unlock(&minor->debugfs_lock);
78
79 return 0;
80}
81
57static int i915_capabilities(struct seq_file *m, void *data) 82static int i915_capabilities(struct seq_file *m, void *data)
58{ 83{
59 struct drm_info_node *node = (struct drm_info_node *) m->private; 84 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
145 seq_printf(m, " (%s)", obj->ring->name); 170 seq_printf(m, " (%s)", obj->ring->name);
146} 171}
147 172
173static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
174{
175 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
176 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
177 seq_putc(m, ' ');
178}
179
148static int i915_gem_object_list_info(struct seq_file *m, void *data) 180static int i915_gem_object_list_info(struct seq_file *m, void *data)
149{ 181{
150 struct drm_info_node *node = (struct drm_info_node *) m->private; 182 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -843,6 +875,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
843 drm_i915_private_t *dev_priv = dev->dev_private; 875 drm_i915_private_t *dev_priv = dev->dev_private;
844 int ret; 876 int ret;
845 877
878 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
879
846 if (IS_GEN5(dev)) { 880 if (IS_GEN5(dev)) {
847 u16 rgvswctl = I915_READ16(MEMSWCTL); 881 u16 rgvswctl = I915_READ16(MEMSWCTL);
848 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 882 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1355,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1321 return 0; 1355 return 0;
1322 } 1356 }
1323 1357
1358 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1359
1324 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1360 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1325 if (ret) 1361 if (ret)
1326 return ret; 1362 return ret;
@@ -1395,12 +1431,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1395{ 1431{
1396 struct drm_info_node *node = (struct drm_info_node *) m->private; 1432 struct drm_info_node *node = (struct drm_info_node *) m->private;
1397 struct drm_device *dev = node->minor->dev; 1433 struct drm_device *dev = node->minor->dev;
1398 drm_i915_private_t *dev_priv = dev->dev_private; 1434 struct intel_fbdev *ifbdev = NULL;
1399 struct intel_fbdev *ifbdev;
1400 struct intel_framebuffer *fb; 1435 struct intel_framebuffer *fb;
1401 int ret;
1402 1436
1403 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1437#ifdef CONFIG_DRM_I915_FBDEV
1438 struct drm_i915_private *dev_priv = dev->dev_private;
1439 int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1404 if (ret) 1440 if (ret)
1405 return ret; 1441 return ret;
1406 1442
@@ -1416,10 +1452,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1416 describe_obj(m, fb->obj); 1452 describe_obj(m, fb->obj);
1417 seq_putc(m, '\n'); 1453 seq_putc(m, '\n');
1418 mutex_unlock(&dev->mode_config.mutex); 1454 mutex_unlock(&dev->mode_config.mutex);
1455#endif
1419 1456
1420 mutex_lock(&dev->mode_config.fb_lock); 1457 mutex_lock(&dev->mode_config.fb_lock);
1421 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1458 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1422 if (&fb->base == ifbdev->helper.fb) 1459 if (ifbdev && &fb->base == ifbdev->helper.fb)
1423 continue; 1460 continue;
1424 1461
1425 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1462 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1479,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1442 struct drm_device *dev = node->minor->dev; 1479 struct drm_device *dev = node->minor->dev;
1443 drm_i915_private_t *dev_priv = dev->dev_private; 1480 drm_i915_private_t *dev_priv = dev->dev_private;
1444 struct intel_ring_buffer *ring; 1481 struct intel_ring_buffer *ring;
1482 struct i915_hw_context *ctx;
1445 int ret, i; 1483 int ret, i;
1446 1484
1447 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1485 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1498,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
1460 seq_putc(m, '\n'); 1498 seq_putc(m, '\n');
1461 } 1499 }
1462 1500
1463 for_each_ring(ring, dev_priv, i) { 1501 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1464 if (ring->default_context) { 1502 seq_puts(m, "HW context ");
1465 seq_printf(m, "HW default context %s ring ", ring->name); 1503 describe_ctx(m, ctx);
1466 describe_obj(m, ring->default_context->obj); 1504 for_each_ring(ring, dev_priv, i)
1467 seq_putc(m, '\n'); 1505 if (ring->default_context == ctx)
1468 } 1506 seq_printf(m, "(default context %s) ", ring->name);
1507
1508 describe_obj(m, ctx->obj);
1509 seq_putc(m, '\n');
1469 } 1510 }
1470 1511
1471 mutex_unlock(&dev->mode_config.mutex); 1512 mutex_unlock(&dev->mode_config.mutex);
@@ -1610,27 +1651,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1610 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1651 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1611 1652
1612 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1653 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1613 vlv_dpio_read(dev_priv, _DPIO_DIV_A)); 1654 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
1614 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1655 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1615 vlv_dpio_read(dev_priv, _DPIO_DIV_B)); 1656 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
1616 1657
1617 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1658 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1618 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1659 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
1619 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1660 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1620 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1661 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
1621 1662
1622 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1663 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1623 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1664 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
1624 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1665 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1625 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1666 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
1626 1667
1627 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", 1668 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1628 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A)); 1669 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
1629 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", 1670 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1630 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B)); 1671 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
1631 1672
1632 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1673 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1633 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1674 vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
1634 1675
1635 mutex_unlock(&dev_priv->dpio_lock); 1676 mutex_unlock(&dev_priv->dpio_lock);
1636 1677
@@ -1655,126 +1696,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1655 struct drm_info_node *node = m->private; 1696 struct drm_info_node *node = m->private;
1656 struct drm_device *dev = node->minor->dev; 1697 struct drm_device *dev = node->minor->dev;
1657 struct drm_i915_private *dev_priv = dev->dev_private; 1698 struct drm_i915_private *dev_priv = dev->dev_private;
1658 u32 psrstat, psrperf; 1699 u32 psrperf = 0;
1659 1700 bool enabled = false;
1660 if (!IS_HASWELL(dev)) {
1661 seq_puts(m, "PSR not supported on this platform\n");
1662 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1663 seq_puts(m, "PSR enabled\n");
1664 } else {
1665 seq_puts(m, "PSR disabled: ");
1666 switch (dev_priv->no_psr_reason) {
1667 case PSR_NO_SOURCE:
1668 seq_puts(m, "not supported on this platform");
1669 break;
1670 case PSR_NO_SINK:
1671 seq_puts(m, "not supported by panel");
1672 break;
1673 case PSR_MODULE_PARAM:
1674 seq_puts(m, "disabled by flag");
1675 break;
1676 case PSR_CRTC_NOT_ACTIVE:
1677 seq_puts(m, "crtc not active");
1678 break;
1679 case PSR_PWR_WELL_ENABLED:
1680 seq_puts(m, "power well enabled");
1681 break;
1682 case PSR_NOT_TILED:
1683 seq_puts(m, "not tiled");
1684 break;
1685 case PSR_SPRITE_ENABLED:
1686 seq_puts(m, "sprite enabled");
1687 break;
1688 case PSR_S3D_ENABLED:
1689 seq_puts(m, "stereo 3d enabled");
1690 break;
1691 case PSR_INTERLACED_ENABLED:
1692 seq_puts(m, "interlaced enabled");
1693 break;
1694 case PSR_HSW_NOT_DDIA:
1695 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1696 break;
1697 default:
1698 seq_puts(m, "unknown reason");
1699 }
1700 seq_puts(m, "\n");
1701 return 0;
1702 }
1703
1704 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1705
1706 seq_puts(m, "PSR Current State: ");
1707 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1708 case EDP_PSR_STATUS_STATE_IDLE:
1709 seq_puts(m, "Reset state\n");
1710 break;
1711 case EDP_PSR_STATUS_STATE_SRDONACK:
1712 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1713 break;
1714 case EDP_PSR_STATUS_STATE_SRDENT:
1715 seq_puts(m, "SRD entry\n");
1716 break;
1717 case EDP_PSR_STATUS_STATE_BUFOFF:
1718 seq_puts(m, "Wait for buffer turn off\n");
1719 break;
1720 case EDP_PSR_STATUS_STATE_BUFON:
1721 seq_puts(m, "Wait for buffer turn on\n");
1722 break;
1723 case EDP_PSR_STATUS_STATE_AUXACK:
1724 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1725 break;
1726 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1727 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1728 break;
1729 default:
1730 seq_puts(m, "Unknown\n");
1731 break;
1732 }
1733 1701
1734 seq_puts(m, "Link Status: "); 1702 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1735 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) { 1703 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1736 case EDP_PSR_STATUS_LINK_FULL_OFF:
1737 seq_puts(m, "Link is fully off\n");
1738 break;
1739 case EDP_PSR_STATUS_LINK_FULL_ON:
1740 seq_puts(m, "Link is fully on\n");
1741 break;
1742 case EDP_PSR_STATUS_LINK_STANDBY:
1743 seq_puts(m, "Link is in standby\n");
1744 break;
1745 default:
1746 seq_puts(m, "Unknown\n");
1747 break;
1748 }
1749 1704
1750 seq_printf(m, "PSR Entry Count: %u\n", 1705 enabled = HAS_PSR(dev) &&
1751 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT & 1706 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1752 EDP_PSR_STATUS_COUNT_MASK); 1707 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1753 1708
1754 seq_printf(m, "Max Sleep Timer Counter: %u\n", 1709 if (HAS_PSR(dev))
1755 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT & 1710 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1756 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK); 1711 EDP_PSR_PERF_CNT_MASK;
1757 1712 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1758 seq_printf(m, "Had AUX error: %s\n",
1759 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1760
1761 seq_printf(m, "Sending AUX: %s\n",
1762 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1763
1764 seq_printf(m, "Sending Idle: %s\n",
1765 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
1766
1767 seq_printf(m, "Sending TP2 TP3: %s\n",
1768 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1769
1770 seq_printf(m, "Sending TP1: %s\n",
1771 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1772
1773 seq_printf(m, "Idle Count: %u\n",
1774 psrstat & EDP_PSR_STATUS_IDLE_MASK);
1775
1776 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1777 seq_printf(m, "Performance Counter: %u\n", psrperf);
1778 1713
1779 return 0; 1714 return 0;
1780} 1715}
@@ -1825,6 +1760,751 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
1825 return 0; 1760 return 0;
1826} 1761}
1827 1762
1763struct pipe_crc_info {
1764 const char *name;
1765 struct drm_device *dev;
1766 enum pipe pipe;
1767};
1768
1769static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
1770{
1771 struct pipe_crc_info *info = inode->i_private;
1772 struct drm_i915_private *dev_priv = info->dev->dev_private;
1773 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1774
1775 spin_lock_irq(&pipe_crc->lock);
1776
1777 if (pipe_crc->opened) {
1778 spin_unlock_irq(&pipe_crc->lock);
1779 return -EBUSY; /* already open */
1780 }
1781
1782 pipe_crc->opened = true;
1783 filep->private_data = inode->i_private;
1784
1785 spin_unlock_irq(&pipe_crc->lock);
1786
1787 return 0;
1788}
1789
1790static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
1791{
1792 struct pipe_crc_info *info = inode->i_private;
1793 struct drm_i915_private *dev_priv = info->dev->dev_private;
1794 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1795
1796 spin_lock_irq(&pipe_crc->lock);
1797 pipe_crc->opened = false;
1798 spin_unlock_irq(&pipe_crc->lock);
1799
1800 return 0;
1801}
1802
1803/* (6 fields, 8 chars each, space separated (5) + '\n') */
1804#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
1805/* account for \'0' */
1806#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
1807
1808static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
1809{
1810 assert_spin_locked(&pipe_crc->lock);
1811 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
1812 INTEL_PIPE_CRC_ENTRIES_NR);
1813}
1814
1815static ssize_t
1816i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
1817 loff_t *pos)
1818{
1819 struct pipe_crc_info *info = filep->private_data;
1820 struct drm_device *dev = info->dev;
1821 struct drm_i915_private *dev_priv = dev->dev_private;
1822 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1823 char buf[PIPE_CRC_BUFFER_LEN];
1824 int head, tail, n_entries, n;
1825 ssize_t bytes_read;
1826
1827 /*
1828 * Don't allow user space to provide buffers not big enough to hold
1829 * a line of data.
1830 */
1831 if (count < PIPE_CRC_LINE_LEN)
1832 return -EINVAL;
1833
1834 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
1835 return 0;
1836
1837 /* nothing to read */
1838 spin_lock_irq(&pipe_crc->lock);
1839 while (pipe_crc_data_count(pipe_crc) == 0) {
1840 int ret;
1841
1842 if (filep->f_flags & O_NONBLOCK) {
1843 spin_unlock_irq(&pipe_crc->lock);
1844 return -EAGAIN;
1845 }
1846
1847 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
1848 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
1849 if (ret) {
1850 spin_unlock_irq(&pipe_crc->lock);
1851 return ret;
1852 }
1853 }
1854
1855 /* We now have one or more entries to read */
1856 head = pipe_crc->head;
1857 tail = pipe_crc->tail;
1858 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
1859 count / PIPE_CRC_LINE_LEN);
1860 spin_unlock_irq(&pipe_crc->lock);
1861
1862 bytes_read = 0;
1863 n = 0;
1864 do {
1865 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
1866 int ret;
1867
1868 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
1869 "%8u %8x %8x %8x %8x %8x\n",
1870 entry->frame, entry->crc[0],
1871 entry->crc[1], entry->crc[2],
1872 entry->crc[3], entry->crc[4]);
1873
1874 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
1875 buf, PIPE_CRC_LINE_LEN);
1876 if (ret == PIPE_CRC_LINE_LEN)
1877 return -EFAULT;
1878
1879 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
1880 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1881 n++;
1882 } while (--n_entries);
1883
1884 spin_lock_irq(&pipe_crc->lock);
1885 pipe_crc->tail = tail;
1886 spin_unlock_irq(&pipe_crc->lock);
1887
1888 return bytes_read;
1889}
1890
1891static const struct file_operations i915_pipe_crc_fops = {
1892 .owner = THIS_MODULE,
1893 .open = i915_pipe_crc_open,
1894 .read = i915_pipe_crc_read,
1895 .release = i915_pipe_crc_release,
1896};
1897
1898static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
1899 {
1900 .name = "i915_pipe_A_crc",
1901 .pipe = PIPE_A,
1902 },
1903 {
1904 .name = "i915_pipe_B_crc",
1905 .pipe = PIPE_B,
1906 },
1907 {
1908 .name = "i915_pipe_C_crc",
1909 .pipe = PIPE_C,
1910 },
1911};
1912
1913static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
1914 enum pipe pipe)
1915{
1916 struct drm_device *dev = minor->dev;
1917 struct dentry *ent;
1918 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
1919
1920 info->dev = dev;
1921 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
1922 &i915_pipe_crc_fops);
1923 if (IS_ERR(ent))
1924 return PTR_ERR(ent);
1925
1926 return drm_add_fake_info_node(minor, ent, info);
1927}
1928
1929static const char * const pipe_crc_sources[] = {
1930 "none",
1931 "plane1",
1932 "plane2",
1933 "pf",
1934 "pipe",
1935 "TV",
1936 "DP-B",
1937 "DP-C",
1938 "DP-D",
1939 "auto",
1940};
1941
1942static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
1943{
1944 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
1945 return pipe_crc_sources[source];
1946}
1947
1948static int display_crc_ctl_show(struct seq_file *m, void *data)
1949{
1950 struct drm_device *dev = m->private;
1951 struct drm_i915_private *dev_priv = dev->dev_private;
1952 int i;
1953
1954 for (i = 0; i < I915_MAX_PIPES; i++)
1955 seq_printf(m, "%c %s\n", pipe_name(i),
1956 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
1957
1958 return 0;
1959}
1960
1961static int display_crc_ctl_open(struct inode *inode, struct file *file)
1962{
1963 struct drm_device *dev = inode->i_private;
1964
1965 return single_open(file, display_crc_ctl_show, dev);
1966}
1967
1968static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
1969 uint32_t *val)
1970{
1971 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
1972 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
1973
1974 switch (*source) {
1975 case INTEL_PIPE_CRC_SOURCE_PIPE:
1976 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
1977 break;
1978 case INTEL_PIPE_CRC_SOURCE_NONE:
1979 *val = 0;
1980 break;
1981 default:
1982 return -EINVAL;
1983 }
1984
1985 return 0;
1986}
1987
1988static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
1989 enum intel_pipe_crc_source *source)
1990{
1991 struct intel_encoder *encoder;
1992 struct intel_crtc *crtc;
1993 struct intel_digital_port *dig_port;
1994 int ret = 0;
1995
1996 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
1997
1998 mutex_lock(&dev->mode_config.mutex);
1999 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2000 base.head) {
2001 if (!encoder->base.crtc)
2002 continue;
2003
2004 crtc = to_intel_crtc(encoder->base.crtc);
2005
2006 if (crtc->pipe != pipe)
2007 continue;
2008
2009 switch (encoder->type) {
2010 case INTEL_OUTPUT_TVOUT:
2011 *source = INTEL_PIPE_CRC_SOURCE_TV;
2012 break;
2013 case INTEL_OUTPUT_DISPLAYPORT:
2014 case INTEL_OUTPUT_EDP:
2015 dig_port = enc_to_dig_port(&encoder->base);
2016 switch (dig_port->port) {
2017 case PORT_B:
2018 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2019 break;
2020 case PORT_C:
2021 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2022 break;
2023 case PORT_D:
2024 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2025 break;
2026 default:
2027 WARN(1, "nonexisting DP port %c\n",
2028 port_name(dig_port->port));
2029 break;
2030 }
2031 break;
2032 }
2033 }
2034 mutex_unlock(&dev->mode_config.mutex);
2035
2036 return ret;
2037}
2038
2039static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2040 enum pipe pipe,
2041 enum intel_pipe_crc_source *source,
2042 uint32_t *val)
2043{
2044 struct drm_i915_private *dev_priv = dev->dev_private;
2045 bool need_stable_symbols = false;
2046
2047 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2048 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2049 if (ret)
2050 return ret;
2051 }
2052
2053 switch (*source) {
2054 case INTEL_PIPE_CRC_SOURCE_PIPE:
2055 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2056 break;
2057 case INTEL_PIPE_CRC_SOURCE_DP_B:
2058 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2059 need_stable_symbols = true;
2060 break;
2061 case INTEL_PIPE_CRC_SOURCE_DP_C:
2062 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2063 need_stable_symbols = true;
2064 break;
2065 case INTEL_PIPE_CRC_SOURCE_NONE:
2066 *val = 0;
2067 break;
2068 default:
2069 return -EINVAL;
2070 }
2071
2072 /*
2073 * When the pipe CRC tap point is after the transcoders we need
2074 * to tweak symbol-level features to produce a deterministic series of
2075 * symbols for a given frame. We need to reset those features only once
2076 * a frame (instead of every nth symbol):
2077 * - DC-balance: used to ensure a better clock recovery from the data
2078 * link (SDVO)
2079 * - DisplayPort scrambling: used for EMI reduction
2080 */
2081 if (need_stable_symbols) {
2082 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2083
2084 WARN_ON(!IS_G4X(dev));
2085
2086 tmp |= DC_BALANCE_RESET_VLV;
2087 if (pipe == PIPE_A)
2088 tmp |= PIPE_A_SCRAMBLE_RESET;
2089 else
2090 tmp |= PIPE_B_SCRAMBLE_RESET;
2091
2092 I915_WRITE(PORT_DFT2_G4X, tmp);
2093 }
2094
2095 return 0;
2096}
2097
2098static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2099 enum pipe pipe,
2100 enum intel_pipe_crc_source *source,
2101 uint32_t *val)
2102{
2103 struct drm_i915_private *dev_priv = dev->dev_private;
2104 bool need_stable_symbols = false;
2105
2106 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2107 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2108 if (ret)
2109 return ret;
2110 }
2111
2112 switch (*source) {
2113 case INTEL_PIPE_CRC_SOURCE_PIPE:
2114 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2115 break;
2116 case INTEL_PIPE_CRC_SOURCE_TV:
2117 if (!SUPPORTS_TV(dev))
2118 return -EINVAL;
2119 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2120 break;
2121 case INTEL_PIPE_CRC_SOURCE_DP_B:
2122 if (!IS_G4X(dev))
2123 return -EINVAL;
2124 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2125 need_stable_symbols = true;
2126 break;
2127 case INTEL_PIPE_CRC_SOURCE_DP_C:
2128 if (!IS_G4X(dev))
2129 return -EINVAL;
2130 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2131 need_stable_symbols = true;
2132 break;
2133 case INTEL_PIPE_CRC_SOURCE_DP_D:
2134 if (!IS_G4X(dev))
2135 return -EINVAL;
2136 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2137 need_stable_symbols = true;
2138 break;
2139 case INTEL_PIPE_CRC_SOURCE_NONE:
2140 *val = 0;
2141 break;
2142 default:
2143 return -EINVAL;
2144 }
2145
2146 /*
2147 * When the pipe CRC tap point is after the transcoders we need
2148 * to tweak symbol-level features to produce a deterministic series of
2149 * symbols for a given frame. We need to reset those features only once
2150 * a frame (instead of every nth symbol):
2151 * - DC-balance: used to ensure a better clock recovery from the data
2152 * link (SDVO)
2153 * - DisplayPort scrambling: used for EMI reduction
2154 */
2155 if (need_stable_symbols) {
2156 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2157
2158 WARN_ON(!IS_G4X(dev));
2159
2160 I915_WRITE(PORT_DFT_I9XX,
2161 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2162
2163 if (pipe == PIPE_A)
2164 tmp |= PIPE_A_SCRAMBLE_RESET;
2165 else
2166 tmp |= PIPE_B_SCRAMBLE_RESET;
2167
2168 I915_WRITE(PORT_DFT2_G4X, tmp);
2169 }
2170
2171 return 0;
2172}
2173
2174static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2175 enum pipe pipe)
2176{
2177 struct drm_i915_private *dev_priv = dev->dev_private;
2178 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2179
2180 if (pipe == PIPE_A)
2181 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2182 else
2183 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2184 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2185 tmp &= ~DC_BALANCE_RESET_VLV;
2186 I915_WRITE(PORT_DFT2_G4X, tmp);
2187
2188}
2189
2190static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2191 enum pipe pipe)
2192{
2193 struct drm_i915_private *dev_priv = dev->dev_private;
2194 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2195
2196 if (pipe == PIPE_A)
2197 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2198 else
2199 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2200 I915_WRITE(PORT_DFT2_G4X, tmp);
2201
2202 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2203 I915_WRITE(PORT_DFT_I9XX,
2204 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2205 }
2206}
2207
2208static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2209 uint32_t *val)
2210{
2211 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2212 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2213
2214 switch (*source) {
2215 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2216 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
2217 break;
2218 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2219 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
2220 break;
2221 case INTEL_PIPE_CRC_SOURCE_PIPE:
2222 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
2223 break;
2224 case INTEL_PIPE_CRC_SOURCE_NONE:
2225 *val = 0;
2226 break;
2227 default:
2228 return -EINVAL;
2229 }
2230
2231 return 0;
2232}
2233
2234static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2235 uint32_t *val)
2236{
2237 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2238 *source = INTEL_PIPE_CRC_SOURCE_PF;
2239
2240 switch (*source) {
2241 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2242 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
2243 break;
2244 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2245 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2246 break;
2247 case INTEL_PIPE_CRC_SOURCE_PF:
2248 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2249 break;
2250 case INTEL_PIPE_CRC_SOURCE_NONE:
2251 *val = 0;
2252 break;
2253 default:
2254 return -EINVAL;
2255 }
2256
2257 return 0;
2258}
2259
2260static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2261 enum intel_pipe_crc_source source)
2262{
2263 struct drm_i915_private *dev_priv = dev->dev_private;
2264 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
2265 u32 val;
2266 int ret;
2267
2268 if (pipe_crc->source == source)
2269 return 0;
2270
2271 /* forbid changing the source without going back to 'none' */
2272 if (pipe_crc->source && source)
2273 return -EINVAL;
2274
2275 if (IS_GEN2(dev))
2276 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
2277 else if (INTEL_INFO(dev)->gen < 5)
2278 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2279 else if (IS_VALLEYVIEW(dev))
2280 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
2281 else if (IS_GEN5(dev) || IS_GEN6(dev))
2282 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2283 else
2284 ret = ivb_pipe_crc_ctl_reg(&source, &val);
2285
2286 if (ret != 0)
2287 return ret;
2288
2289 /* none -> real source transition */
2290 if (source) {
2291 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2292 pipe_name(pipe), pipe_crc_source_name(source));
2293
2294 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
2295 INTEL_PIPE_CRC_ENTRIES_NR,
2296 GFP_KERNEL);
2297 if (!pipe_crc->entries)
2298 return -ENOMEM;
2299
2300 spin_lock_irq(&pipe_crc->lock);
2301 pipe_crc->head = 0;
2302 pipe_crc->tail = 0;
2303 spin_unlock_irq(&pipe_crc->lock);
2304 }
2305
2306 pipe_crc->source = source;
2307
2308 I915_WRITE(PIPE_CRC_CTL(pipe), val);
2309 POSTING_READ(PIPE_CRC_CTL(pipe));
2310
2311 /* real source -> none transition */
2312 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2313 struct intel_pipe_crc_entry *entries;
2314
2315 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2316 pipe_name(pipe));
2317
2318 intel_wait_for_vblank(dev, pipe);
2319
2320 spin_lock_irq(&pipe_crc->lock);
2321 entries = pipe_crc->entries;
2322 pipe_crc->entries = NULL;
2323 spin_unlock_irq(&pipe_crc->lock);
2324
2325 kfree(entries);
2326
2327 if (IS_G4X(dev))
2328 g4x_undo_pipe_scramble_reset(dev, pipe);
2329 else if (IS_VALLEYVIEW(dev))
2330 vlv_undo_pipe_scramble_reset(dev, pipe);
2331 }
2332
2333 return 0;
2334}
2335
2336/*
2337 * Parse pipe CRC command strings:
2338 * command: wsp* object wsp+ name wsp+ source wsp*
2339 * object: 'pipe'
2340 * name: (A | B | C)
2341 * source: (none | plane1 | plane2 | pf)
2342 * wsp: (#0x20 | #0x9 | #0xA)+
2343 *
2344 * eg.:
2345 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2346 * "pipe A none" -> Stop CRC
2347 */
2348static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
2349{
2350 int n_words = 0;
2351
2352 while (*buf) {
2353 char *end;
2354
2355 /* skip leading white space */
2356 buf = skip_spaces(buf);
2357 if (!*buf)
2358 break; /* end of buffer */
2359
2360 /* find end of word */
2361 for (end = buf; *end && !isspace(*end); end++)
2362 ;
2363
2364 if (n_words == max_words) {
2365 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2366 max_words);
2367 return -EINVAL; /* ran out of words[] before bytes */
2368 }
2369
2370 if (*end)
2371 *end++ = '\0';
2372 words[n_words++] = buf;
2373 buf = end;
2374 }
2375
2376 return n_words;
2377}
2378
2379enum intel_pipe_crc_object {
2380 PIPE_CRC_OBJECT_PIPE,
2381};
2382
2383static const char * const pipe_crc_objects[] = {
2384 "pipe",
2385};
2386
2387static int
2388display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
2389{
2390 int i;
2391
2392 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
2393 if (!strcmp(buf, pipe_crc_objects[i])) {
2394 *o = i;
2395 return 0;
2396 }
2397
2398 return -EINVAL;
2399}
2400
2401static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
2402{
2403 const char name = buf[0];
2404
2405 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
2406 return -EINVAL;
2407
2408 *pipe = name - 'A';
2409
2410 return 0;
2411}
2412
2413static int
2414display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
2415{
2416 int i;
2417
2418 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
2419 if (!strcmp(buf, pipe_crc_sources[i])) {
2420 *s = i;
2421 return 0;
2422 }
2423
2424 return -EINVAL;
2425}
2426
2427static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
2428{
2429#define N_WORDS 3
2430 int n_words;
2431 char *words[N_WORDS];
2432 enum pipe pipe;
2433 enum intel_pipe_crc_object object;
2434 enum intel_pipe_crc_source source;
2435
2436 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
2437 if (n_words != N_WORDS) {
2438 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
2439 N_WORDS);
2440 return -EINVAL;
2441 }
2442
2443 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
2444 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
2445 return -EINVAL;
2446 }
2447
2448 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
2449 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
2450 return -EINVAL;
2451 }
2452
2453 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
2454 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
2455 return -EINVAL;
2456 }
2457
2458 return pipe_crc_set_source(dev, pipe, source);
2459}
2460
2461static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
2462 size_t len, loff_t *offp)
2463{
2464 struct seq_file *m = file->private_data;
2465 struct drm_device *dev = m->private;
2466 char *tmpbuf;
2467 int ret;
2468
2469 if (len == 0)
2470 return 0;
2471
2472 if (len > PAGE_SIZE - 1) {
2473 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
2474 PAGE_SIZE);
2475 return -E2BIG;
2476 }
2477
2478 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
2479 if (!tmpbuf)
2480 return -ENOMEM;
2481
2482 if (copy_from_user(tmpbuf, ubuf, len)) {
2483 ret = -EFAULT;
2484 goto out;
2485 }
2486 tmpbuf[len] = '\0';
2487
2488 ret = display_crc_ctl_parse(dev, tmpbuf, len);
2489
2490out:
2491 kfree(tmpbuf);
2492 if (ret < 0)
2493 return ret;
2494
2495 *offp += len;
2496 return len;
2497}
2498
2499static const struct file_operations i915_display_crc_ctl_fops = {
2500 .owner = THIS_MODULE,
2501 .open = display_crc_ctl_open,
2502 .read = seq_read,
2503 .llseek = seq_lseek,
2504 .release = single_release,
2505 .write = display_crc_ctl_write
2506};
2507
1828static int 2508static int
1829i915_wedged_get(void *data, u64 *val) 2509i915_wedged_get(void *data, u64 *val)
1830{ 2510{
@@ -1885,6 +2565,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1885 i915_ring_stop_get, i915_ring_stop_set, 2565 i915_ring_stop_get, i915_ring_stop_set,
1886 "0x%08llx\n"); 2566 "0x%08llx\n");
1887 2567
2568static int
2569i915_ring_missed_irq_get(void *data, u64 *val)
2570{
2571 struct drm_device *dev = data;
2572 struct drm_i915_private *dev_priv = dev->dev_private;
2573
2574 *val = dev_priv->gpu_error.missed_irq_rings;
2575 return 0;
2576}
2577
2578static int
2579i915_ring_missed_irq_set(void *data, u64 val)
2580{
2581 struct drm_device *dev = data;
2582 struct drm_i915_private *dev_priv = dev->dev_private;
2583 int ret;
2584
2585 /* Lock against concurrent debugfs callers */
2586 ret = mutex_lock_interruptible(&dev->struct_mutex);
2587 if (ret)
2588 return ret;
2589 dev_priv->gpu_error.missed_irq_rings = val;
2590 mutex_unlock(&dev->struct_mutex);
2591
2592 return 0;
2593}
2594
2595DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
2596 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
2597 "0x%08llx\n");
2598
2599static int
2600i915_ring_test_irq_get(void *data, u64 *val)
2601{
2602 struct drm_device *dev = data;
2603 struct drm_i915_private *dev_priv = dev->dev_private;
2604
2605 *val = dev_priv->gpu_error.test_irq_rings;
2606
2607 return 0;
2608}
2609
2610static int
2611i915_ring_test_irq_set(void *data, u64 val)
2612{
2613 struct drm_device *dev = data;
2614 struct drm_i915_private *dev_priv = dev->dev_private;
2615 int ret;
2616
2617 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
2618
2619 /* Lock against concurrent debugfs callers */
2620 ret = mutex_lock_interruptible(&dev->struct_mutex);
2621 if (ret)
2622 return ret;
2623
2624 dev_priv->gpu_error.test_irq_rings = val;
2625 mutex_unlock(&dev->struct_mutex);
2626
2627 return 0;
2628}
2629
2630DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
2631 i915_ring_test_irq_get, i915_ring_test_irq_set,
2632 "0x%08llx\n");
2633
1888#define DROP_UNBOUND 0x1 2634#define DROP_UNBOUND 0x1
1889#define DROP_BOUND 0x2 2635#define DROP_BOUND 0x2
1890#define DROP_RETIRE 0x4 2636#define DROP_RETIRE 0x4
@@ -1972,6 +2718,8 @@ i915_max_freq_get(void *data, u64 *val)
1972 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2718 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1973 return -ENODEV; 2719 return -ENODEV;
1974 2720
2721 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2722
1975 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2723 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1976 if (ret) 2724 if (ret)
1977 return ret; 2725 return ret;
@@ -1996,6 +2744,8 @@ i915_max_freq_set(void *data, u64 val)
1996 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2744 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1997 return -ENODEV; 2745 return -ENODEV;
1998 2746
2747 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2748
1999 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 2749 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
2000 2750
2001 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2751 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2784,8 @@ i915_min_freq_get(void *data, u64 *val)
2034 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2784 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2035 return -ENODEV; 2785 return -ENODEV;
2036 2786
2787 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2788
2037 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2789 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2038 if (ret) 2790 if (ret)
2039 return ret; 2791 return ret;
@@ -2058,6 +2810,8 @@ i915_min_freq_set(void *data, u64 val)
2058 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2810 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2059 return -ENODEV; 2811 return -ENODEV;
2060 2812
2813 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2814
2061 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 2815 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
2062 2816
2063 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2817 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2890,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2136 i915_cache_sharing_get, i915_cache_sharing_set, 2890 i915_cache_sharing_get, i915_cache_sharing_set,
2137 "%llu\n"); 2891 "%llu\n");
2138 2892
2139/* As the drm_debugfs_init() routines are called before dev->dev_private is
2140 * allocated we need to hook into the minor for release. */
2141static int
2142drm_add_fake_info_node(struct drm_minor *minor,
2143 struct dentry *ent,
2144 const void *key)
2145{
2146 struct drm_info_node *node;
2147
2148 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
2149 if (node == NULL) {
2150 debugfs_remove(ent);
2151 return -ENOMEM;
2152 }
2153
2154 node->minor = minor;
2155 node->dent = ent;
2156 node->info_ent = (void *) key;
2157
2158 mutex_lock(&minor->debugfs_lock);
2159 list_add(&node->list, &minor->debugfs_list);
2160 mutex_unlock(&minor->debugfs_lock);
2161
2162 return 0;
2163}
2164
2165static int i915_forcewake_open(struct inode *inode, struct file *file) 2893static int i915_forcewake_open(struct inode *inode, struct file *file)
2166{ 2894{
2167 struct drm_device *dev = inode->i_private; 2895 struct drm_device *dev = inode->i_private;
@@ -2278,11 +3006,28 @@ static struct i915_debugfs_files {
2278 {"i915_min_freq", &i915_min_freq_fops}, 3006 {"i915_min_freq", &i915_min_freq_fops},
2279 {"i915_cache_sharing", &i915_cache_sharing_fops}, 3007 {"i915_cache_sharing", &i915_cache_sharing_fops},
2280 {"i915_ring_stop", &i915_ring_stop_fops}, 3008 {"i915_ring_stop", &i915_ring_stop_fops},
3009 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
3010 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
2281 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 3011 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2282 {"i915_error_state", &i915_error_state_fops}, 3012 {"i915_error_state", &i915_error_state_fops},
2283 {"i915_next_seqno", &i915_next_seqno_fops}, 3013 {"i915_next_seqno", &i915_next_seqno_fops},
3014 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
2284}; 3015};
2285 3016
3017void intel_display_crc_init(struct drm_device *dev)
3018{
3019 struct drm_i915_private *dev_priv = dev->dev_private;
3020 int i;
3021
3022 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
3023 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
3024
3025 pipe_crc->opened = false;
3026 spin_lock_init(&pipe_crc->lock);
3027 init_waitqueue_head(&pipe_crc->wq);
3028 }
3029}
3030
2286int i915_debugfs_init(struct drm_minor *minor) 3031int i915_debugfs_init(struct drm_minor *minor)
2287{ 3032{
2288 int ret, i; 3033 int ret, i;
@@ -2291,6 +3036,12 @@ int i915_debugfs_init(struct drm_minor *minor)
2291 if (ret) 3036 if (ret)
2292 return ret; 3037 return ret;
2293 3038
3039 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3040 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
3041 if (ret)
3042 return ret;
3043 }
3044
2294 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3045 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2295 ret = i915_debugfs_create(minor->debugfs_root, minor, 3046 ret = i915_debugfs_create(minor->debugfs_root, minor,
2296 i915_debugfs_files[i].name, 3047 i915_debugfs_files[i].name,
@@ -2310,8 +3061,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2310 3061
2311 drm_debugfs_remove_files(i915_debugfs_list, 3062 drm_debugfs_remove_files(i915_debugfs_list,
2312 I915_DEBUGFS_ENTRIES, minor); 3063 I915_DEBUGFS_ENTRIES, minor);
3064
2313 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 3065 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2314 1, minor); 3066 1, minor);
3067
3068 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3069 struct drm_info_list *info_list =
3070 (struct drm_info_list *)&i915_pipe_crc_data[i];
3071
3072 drm_debugfs_remove_files(info_list, 1, minor);
3073 }
3074
2315 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3075 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2316 struct drm_info_list *info_list = 3076 struct drm_info_list *info_list =
2317 (struct drm_info_list *) i915_debugfs_files[i].fops; 3077 (struct drm_info_list *) i915_debugfs_files[i].fops;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d5c784d48671..0cab2d045135 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -52,7 +52,7 @@
52 intel_ring_emit(LP_RING(dev_priv), x) 52 intel_ring_emit(LP_RING(dev_priv), x)
53 53
54#define ADVANCE_LP_RING() \ 54#define ADVANCE_LP_RING() \
55 intel_ring_advance(LP_RING(dev_priv)) 55 __intel_ring_advance(LP_RING(dev_priv))
56 56
57/** 57/**
58 * Lock test for when it's just for synchronization of ring access. 58 * Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
641 641
642 if (batch->num_cliprects) { 642 if (batch->num_cliprects) {
643 cliprects = kcalloc(batch->num_cliprects, 643 cliprects = kcalloc(batch->num_cliprects,
644 sizeof(struct drm_clip_rect), 644 sizeof(*cliprects),
645 GFP_KERNEL); 645 GFP_KERNEL);
646 if (cliprects == NULL) 646 if (cliprects == NULL)
647 return -ENOMEM; 647 return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
703 703
704 if (cmdbuf->num_cliprects) { 704 if (cmdbuf->num_cliprects) {
705 cliprects = kcalloc(cmdbuf->num_cliprects, 705 cliprects = kcalloc(cmdbuf->num_cliprects,
706 sizeof(struct drm_clip_rect), GFP_KERNEL); 706 sizeof(*cliprects), GFP_KERNEL);
707 if (cliprects == NULL) { 707 if (cliprects == NULL) {
708 ret = -ENOMEM; 708 ret = -ENOMEM;
709 goto fail_batch_free; 709 goto fail_batch_free;
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
931 value = READ_BREADCRUMB(dev_priv); 931 value = READ_BREADCRUMB(dev_priv);
932 break; 932 break;
933 case I915_PARAM_CHIPSET_ID: 933 case I915_PARAM_CHIPSET_ID:
934 value = dev->pci_device; 934 value = dev->pdev->device;
935 break; 935 break;
936 case I915_PARAM_HAS_GEM: 936 case I915_PARAM_HAS_GEM:
937 value = 1; 937 value = 1;
@@ -1311,13 +1311,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
1311 if (ret) 1311 if (ret)
1312 goto cleanup_gem_stolen; 1312 goto cleanup_gem_stolen;
1313 1313
1314 intel_power_domains_init_hw(dev);
1315
1314 /* Important: The output setup functions called by modeset_init need 1316 /* Important: The output setup functions called by modeset_init need
1315 * working irqs for e.g. gmbus and dp aux transfers. */ 1317 * working irqs for e.g. gmbus and dp aux transfers. */
1316 intel_modeset_init(dev); 1318 intel_modeset_init(dev);
1317 1319
1318 ret = i915_gem_init(dev); 1320 ret = i915_gem_init(dev);
1319 if (ret) 1321 if (ret)
1320 goto cleanup_irq; 1322 goto cleanup_power;
1321 1323
1322 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1324 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1323 1325
@@ -1325,9 +1327,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
1325 1327
1326 /* Always safe in the mode setting case. */ 1328 /* Always safe in the mode setting case. */
1327 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1329 /* FIXME: do pre/post-mode set stuff in core KMS code */
1328 dev->vblank_disable_allowed = 1; 1330 dev->vblank_disable_allowed = true;
1329 if (INTEL_INFO(dev)->num_pipes == 0) 1331 if (INTEL_INFO(dev)->num_pipes == 0) {
1332 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1330 return 0; 1333 return 0;
1334 }
1331 1335
1332 ret = intel_fbdev_init(dev); 1336 ret = intel_fbdev_init(dev);
1333 if (ret) 1337 if (ret)
@@ -1362,7 +1366,8 @@ cleanup_gem:
1362 mutex_unlock(&dev->struct_mutex); 1366 mutex_unlock(&dev->struct_mutex);
1363 i915_gem_cleanup_aliasing_ppgtt(dev); 1367 i915_gem_cleanup_aliasing_ppgtt(dev);
1364 drm_mm_takedown(&dev_priv->gtt.base.mm); 1368 drm_mm_takedown(&dev_priv->gtt.base.mm);
1365cleanup_irq: 1369cleanup_power:
1370 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1366 drm_irq_uninstall(dev); 1371 drm_irq_uninstall(dev);
1367cleanup_gem_stolen: 1372cleanup_gem_stolen:
1368 i915_gem_cleanup_stolen(dev); 1373 i915_gem_cleanup_stolen(dev);
@@ -1398,6 +1403,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1398 master->driver_priv = NULL; 1403 master->driver_priv = NULL;
1399} 1404}
1400 1405
1406#ifdef CONFIG_DRM_I915_FBDEV
1401static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1407static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1402{ 1408{
1403 struct apertures_struct *ap; 1409 struct apertures_struct *ap;
@@ -1418,6 +1424,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1418 1424
1419 kfree(ap); 1425 kfree(ap);
1420} 1426}
1427#else
1428static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1429{
1430}
1431#endif
1421 1432
1422static void i915_dump_device_info(struct drm_i915_private *dev_priv) 1433static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1423{ 1434{
@@ -1459,17 +1470,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1459 info = (struct intel_device_info *) flags; 1470 info = (struct intel_device_info *) flags;
1460 1471
1461 /* Refuse to load on gen6+ without kms enabled. */ 1472 /* Refuse to load on gen6+ without kms enabled. */
1462 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1473 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
1474 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
1475 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
1463 return -ENODEV; 1476 return -ENODEV;
1477 }
1464 1478
1465 /* i915 has 4 more counters */ 1479 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1466 dev->counters += 4;
1467 dev->types[6] = _DRM_STAT_IRQ;
1468 dev->types[7] = _DRM_STAT_PRIMARY;
1469 dev->types[8] = _DRM_STAT_SECONDARY;
1470 dev->types[9] = _DRM_STAT_DMA;
1471
1472 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1473 if (dev_priv == NULL) 1480 if (dev_priv == NULL)
1474 return -ENOMEM; 1481 return -ENOMEM;
1475 1482
@@ -1494,6 +1501,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1494 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ 1501 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1495 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); 1502 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1496 1503
1504 intel_display_crc_init(dev);
1505
1497 i915_dump_device_info(dev_priv); 1506 i915_dump_device_info(dev_priv);
1498 1507
1499 /* Not all pre-production machines fall into this category, only the 1508 /* Not all pre-production machines fall into this category, only the
@@ -1531,19 +1540,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1531 1540
1532 intel_uncore_early_sanitize(dev); 1541 intel_uncore_early_sanitize(dev);
1533 1542
1534 if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { 1543 /* This must be called before any calls to HAS_PCH_* */
1535 /* The docs do not explain exactly how the calculation can be 1544 intel_detect_pch(dev);
1536 * made. It is somewhat guessable, but for now, it's always 1545
1537 * 128MB. 1546 intel_uncore_init(dev);
1538 * NB: We can't write IDICR yet because we do not have gt funcs
1539 * set up */
1540 dev_priv->ellc_size = 128;
1541 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
1542 }
1543 1547
1544 ret = i915_gem_gtt_init(dev); 1548 ret = i915_gem_gtt_init(dev);
1545 if (ret) 1549 if (ret)
1546 goto put_bridge; 1550 goto out_regs;
1547 1551
1548 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1552 if (drm_core_check_feature(dev, DRIVER_MODESET))
1549 i915_kick_out_firmware_fb(dev_priv); 1553 i915_kick_out_firmware_fb(dev_priv);
@@ -1572,7 +1576,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1572 aperture_size); 1576 aperture_size);
1573 if (dev_priv->gtt.mappable == NULL) { 1577 if (dev_priv->gtt.mappable == NULL) {
1574 ret = -EIO; 1578 ret = -EIO;
1575 goto out_rmmap; 1579 goto out_gtt;
1576 } 1580 }
1577 1581
1578 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1582 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1598,13 +1602,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1598 goto out_mtrrfree; 1602 goto out_mtrrfree;
1599 } 1603 }
1600 1604
1601 /* This must be called before any calls to HAS_PCH_* */
1602 intel_detect_pch(dev);
1603
1604 intel_irq_init(dev); 1605 intel_irq_init(dev);
1605 intel_pm_init(dev); 1606 intel_pm_init(dev);
1606 intel_uncore_sanitize(dev); 1607 intel_uncore_sanitize(dev);
1607 intel_uncore_init(dev);
1608 1608
1609 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
1610 intel_setup_mchbar(dev); 1610 intel_setup_mchbar(dev);
@@ -1640,13 +1640,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1640 } 1640 }
1641 1641
1642 if (HAS_POWER_WELL(dev)) 1642 if (HAS_POWER_WELL(dev))
1643 i915_init_power_well(dev); 1643 intel_power_domains_init(dev);
1644 1644
1645 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1645 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1646 ret = i915_load_modeset_init(dev); 1646 ret = i915_load_modeset_init(dev);
1647 if (ret < 0) { 1647 if (ret < 0) {
1648 DRM_ERROR("failed to init modeset\n"); 1648 DRM_ERROR("failed to init modeset\n");
1649 goto out_gem_unload; 1649 goto out_power_well;
1650 } 1650 }
1651 } else { 1651 } else {
1652 /* Start out suspended in ums mode. */ 1652 /* Start out suspended in ums mode. */
@@ -1666,6 +1666,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1666 1666
1667 return 0; 1667 return 0;
1668 1668
1669out_power_well:
1670 if (HAS_POWER_WELL(dev))
1671 intel_power_domains_remove(dev);
1672 drm_vblank_cleanup(dev);
1669out_gem_unload: 1673out_gem_unload:
1670 if (dev_priv->mm.inactive_shrinker.scan_objects) 1674 if (dev_priv->mm.inactive_shrinker.scan_objects)
1671 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1675 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1679,12 +1683,18 @@ out_gem_unload:
1679out_mtrrfree: 1683out_mtrrfree:
1680 arch_phys_wc_del(dev_priv->gtt.mtrr); 1684 arch_phys_wc_del(dev_priv->gtt.mtrr);
1681 io_mapping_free(dev_priv->gtt.mappable); 1685 io_mapping_free(dev_priv->gtt.mappable);
1686out_gtt:
1687 list_del(&dev_priv->gtt.base.global_link);
1688 drm_mm_takedown(&dev_priv->gtt.base.mm);
1682 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1689 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1683out_rmmap: 1690out_regs:
1691 intel_uncore_fini(dev);
1684 pci_iounmap(dev->pdev, dev_priv->regs); 1692 pci_iounmap(dev->pdev, dev_priv->regs);
1685put_bridge: 1693put_bridge:
1686 pci_dev_put(dev_priv->bridge_dev); 1694 pci_dev_put(dev_priv->bridge_dev);
1687free_priv: 1695free_priv:
1696 if (dev_priv->slab)
1697 kmem_cache_destroy(dev_priv->slab);
1688 kfree(dev_priv); 1698 kfree(dev_priv);
1689 return ret; 1699 return ret;
1690} 1700}
@@ -1700,8 +1710,8 @@ int i915_driver_unload(struct drm_device *dev)
1700 /* The i915.ko module is still not prepared to be loaded when 1710 /* The i915.ko module is still not prepared to be loaded when
1701 * the power well is not enabled, so just enable it in case 1711 * the power well is not enabled, so just enable it in case
1702 * we're going to unload/reload. */ 1712 * we're going to unload/reload. */
1703 intel_set_power_well(dev, true); 1713 intel_display_set_init_power(dev, true);
1704 i915_remove_power_well(dev); 1714 intel_power_domains_remove(dev);
1705 } 1715 }
1706 1716
1707 i915_teardown_sysfs(dev); 1717 i915_teardown_sysfs(dev);
@@ -1709,15 +1719,9 @@ int i915_driver_unload(struct drm_device *dev)
1709 if (dev_priv->mm.inactive_shrinker.scan_objects) 1719 if (dev_priv->mm.inactive_shrinker.scan_objects)
1710 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1720 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1711 1721
1712 mutex_lock(&dev->struct_mutex); 1722 ret = i915_gem_suspend(dev);
1713 ret = i915_gpu_idle(dev);
1714 if (ret) 1723 if (ret)
1715 DRM_ERROR("failed to idle hardware: %d\n", ret); 1724 DRM_ERROR("failed to idle hardware: %d\n", ret);
1716 i915_gem_retire_requests(dev);
1717 mutex_unlock(&dev->struct_mutex);
1718
1719 /* Cancel the retire work handler, which should be idle now. */
1720 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1721 1725
1722 io_mapping_free(dev_priv->gtt.mappable); 1726 io_mapping_free(dev_priv->gtt.mappable);
1723 arch_phys_wc_del(dev_priv->gtt.mtrr); 1727 arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1774,8 +1778,8 @@ int i915_driver_unload(struct drm_device *dev)
1774 list_del(&dev_priv->gtt.base.global_link); 1778 list_del(&dev_priv->gtt.base.global_link);
1775 WARN_ON(!list_empty(&dev_priv->vm_list)); 1779 WARN_ON(!list_empty(&dev_priv->vm_list));
1776 drm_mm_takedown(&dev_priv->gtt.base.mm); 1780 drm_mm_takedown(&dev_priv->gtt.base.mm);
1777 if (dev_priv->regs != NULL) 1781
1778 pci_iounmap(dev->pdev, dev_priv->regs); 1782 drm_vblank_cleanup(dev);
1779 1783
1780 intel_teardown_gmbus(dev); 1784 intel_teardown_gmbus(dev);
1781 intel_teardown_mchbar(dev); 1785 intel_teardown_mchbar(dev);
@@ -1785,6 +1789,10 @@ int i915_driver_unload(struct drm_device *dev)
1785 1789
1786 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1790 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1787 1791
1792 intel_uncore_fini(dev);
1793 if (dev_priv->regs != NULL)
1794 pci_iounmap(dev->pdev, dev_priv->regs);
1795
1788 if (dev_priv->slab) 1796 if (dev_priv->slab)
1789 kmem_cache_destroy(dev_priv->slab); 1797 kmem_cache_destroy(dev_priv->slab);
1790 1798
@@ -1796,19 +1804,11 @@ int i915_driver_unload(struct drm_device *dev)
1796 1804
1797int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1805int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1798{ 1806{
1799 struct drm_i915_file_private *file_priv; 1807 int ret;
1800
1801 DRM_DEBUG_DRIVER("\n");
1802 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1803 if (!file_priv)
1804 return -ENOMEM;
1805
1806 file->driver_priv = file_priv;
1807
1808 spin_lock_init(&file_priv->mm.lock);
1809 INIT_LIST_HEAD(&file_priv->mm.request_list);
1810 1808
1811 idr_init(&file_priv->context_idr); 1809 ret = i915_gem_open(dev, file);
1810 if (ret)
1811 return ret;
1812 1812
1813 return 0; 1813 return 0;
1814} 1814}
@@ -1836,7 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1836 return; 1836 return;
1837 1837
1838 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1838 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1839 intel_fb_restore_mode(dev); 1839 intel_fbdev_restore_mode(dev);
1840 vga_switcheroo_process_delayed_switch(); 1840 vga_switcheroo_process_delayed_switch();
1841 return; 1841 return;
1842 } 1842 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2ad27880cd04..a0804fa1e306 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -160,49 +160,58 @@ extern int intel_agp_enabled;
160static const struct intel_device_info intel_i830_info = { 160static const struct intel_device_info intel_i830_info = {
161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
162 .has_overlay = 1, .overlay_needs_physical = 1, 162 .has_overlay = 1, .overlay_needs_physical = 1,
163 .ring_mask = RENDER_RING,
163}; 164};
164 165
165static const struct intel_device_info intel_845g_info = { 166static const struct intel_device_info intel_845g_info = {
166 .gen = 2, .num_pipes = 1, 167 .gen = 2, .num_pipes = 1,
167 .has_overlay = 1, .overlay_needs_physical = 1, 168 .has_overlay = 1, .overlay_needs_physical = 1,
169 .ring_mask = RENDER_RING,
168}; 170};
169 171
170static const struct intel_device_info intel_i85x_info = { 172static const struct intel_device_info intel_i85x_info = {
171 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 173 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
172 .cursor_needs_physical = 1, 174 .cursor_needs_physical = 1,
173 .has_overlay = 1, .overlay_needs_physical = 1, 175 .has_overlay = 1, .overlay_needs_physical = 1,
176 .ring_mask = RENDER_RING,
174}; 177};
175 178
176static const struct intel_device_info intel_i865g_info = { 179static const struct intel_device_info intel_i865g_info = {
177 .gen = 2, .num_pipes = 1, 180 .gen = 2, .num_pipes = 1,
178 .has_overlay = 1, .overlay_needs_physical = 1, 181 .has_overlay = 1, .overlay_needs_physical = 1,
182 .ring_mask = RENDER_RING,
179}; 183};
180 184
181static const struct intel_device_info intel_i915g_info = { 185static const struct intel_device_info intel_i915g_info = {
182 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 186 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
183 .has_overlay = 1, .overlay_needs_physical = 1, 187 .has_overlay = 1, .overlay_needs_physical = 1,
188 .ring_mask = RENDER_RING,
184}; 189};
185static const struct intel_device_info intel_i915gm_info = { 190static const struct intel_device_info intel_i915gm_info = {
186 .gen = 3, .is_mobile = 1, .num_pipes = 2, 191 .gen = 3, .is_mobile = 1, .num_pipes = 2,
187 .cursor_needs_physical = 1, 192 .cursor_needs_physical = 1,
188 .has_overlay = 1, .overlay_needs_physical = 1, 193 .has_overlay = 1, .overlay_needs_physical = 1,
189 .supports_tv = 1, 194 .supports_tv = 1,
195 .ring_mask = RENDER_RING,
190}; 196};
191static const struct intel_device_info intel_i945g_info = { 197static const struct intel_device_info intel_i945g_info = {
192 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 198 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
193 .has_overlay = 1, .overlay_needs_physical = 1, 199 .has_overlay = 1, .overlay_needs_physical = 1,
200 .ring_mask = RENDER_RING,
194}; 201};
195static const struct intel_device_info intel_i945gm_info = { 202static const struct intel_device_info intel_i945gm_info = {
196 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 203 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
197 .has_hotplug = 1, .cursor_needs_physical = 1, 204 .has_hotplug = 1, .cursor_needs_physical = 1,
198 .has_overlay = 1, .overlay_needs_physical = 1, 205 .has_overlay = 1, .overlay_needs_physical = 1,
199 .supports_tv = 1, 206 .supports_tv = 1,
207 .ring_mask = RENDER_RING,
200}; 208};
201 209
202static const struct intel_device_info intel_i965g_info = { 210static const struct intel_device_info intel_i965g_info = {
203 .gen = 4, .is_broadwater = 1, .num_pipes = 2, 211 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
204 .has_hotplug = 1, 212 .has_hotplug = 1,
205 .has_overlay = 1, 213 .has_overlay = 1,
214 .ring_mask = RENDER_RING,
206}; 215};
207 216
208static const struct intel_device_info intel_i965gm_info = { 217static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = {
210 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 219 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
211 .has_overlay = 1, 220 .has_overlay = 1,
212 .supports_tv = 1, 221 .supports_tv = 1,
222 .ring_mask = RENDER_RING,
213}; 223};
214 224
215static const struct intel_device_info intel_g33_info = { 225static const struct intel_device_info intel_g33_info = {
216 .gen = 3, .is_g33 = 1, .num_pipes = 2, 226 .gen = 3, .is_g33 = 1, .num_pipes = 2,
217 .need_gfx_hws = 1, .has_hotplug = 1, 227 .need_gfx_hws = 1, .has_hotplug = 1,
218 .has_overlay = 1, 228 .has_overlay = 1,
229 .ring_mask = RENDER_RING,
219}; 230};
220 231
221static const struct intel_device_info intel_g45_info = { 232static const struct intel_device_info intel_g45_info = {
222 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 233 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
223 .has_pipe_cxsr = 1, .has_hotplug = 1, 234 .has_pipe_cxsr = 1, .has_hotplug = 1,
224 .has_bsd_ring = 1, 235 .ring_mask = RENDER_RING | BSD_RING,
225}; 236};
226 237
227static const struct intel_device_info intel_gm45_info = { 238static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = {
229 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 240 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
230 .has_pipe_cxsr = 1, .has_hotplug = 1, 241 .has_pipe_cxsr = 1, .has_hotplug = 1,
231 .supports_tv = 1, 242 .supports_tv = 1,
232 .has_bsd_ring = 1, 243 .ring_mask = RENDER_RING | BSD_RING,
233}; 244};
234 245
235static const struct intel_device_info intel_pineview_info = { 246static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = {
241static const struct intel_device_info intel_ironlake_d_info = { 252static const struct intel_device_info intel_ironlake_d_info = {
242 .gen = 5, .num_pipes = 2, 253 .gen = 5, .num_pipes = 2,
243 .need_gfx_hws = 1, .has_hotplug = 1, 254 .need_gfx_hws = 1, .has_hotplug = 1,
244 .has_bsd_ring = 1, 255 .ring_mask = RENDER_RING | BSD_RING,
245}; 256};
246 257
247static const struct intel_device_info intel_ironlake_m_info = { 258static const struct intel_device_info intel_ironlake_m_info = {
248 .gen = 5, .is_mobile = 1, .num_pipes = 2, 259 .gen = 5, .is_mobile = 1, .num_pipes = 2,
249 .need_gfx_hws = 1, .has_hotplug = 1, 260 .need_gfx_hws = 1, .has_hotplug = 1,
250 .has_fbc = 1, 261 .has_fbc = 1,
251 .has_bsd_ring = 1, 262 .ring_mask = RENDER_RING | BSD_RING,
252}; 263};
253 264
254static const struct intel_device_info intel_sandybridge_d_info = { 265static const struct intel_device_info intel_sandybridge_d_info = {
255 .gen = 6, .num_pipes = 2, 266 .gen = 6, .num_pipes = 2,
256 .need_gfx_hws = 1, .has_hotplug = 1, 267 .need_gfx_hws = 1, .has_hotplug = 1,
257 .has_bsd_ring = 1, 268 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
258 .has_blt_ring = 1,
259 .has_llc = 1, 269 .has_llc = 1,
260 .has_force_wake = 1,
261}; 270};
262 271
263static const struct intel_device_info intel_sandybridge_m_info = { 272static const struct intel_device_info intel_sandybridge_m_info = {
264 .gen = 6, .is_mobile = 1, .num_pipes = 2, 273 .gen = 6, .is_mobile = 1, .num_pipes = 2,
265 .need_gfx_hws = 1, .has_hotplug = 1, 274 .need_gfx_hws = 1, .has_hotplug = 1,
266 .has_fbc = 1, 275 .has_fbc = 1,
267 .has_bsd_ring = 1, 276 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
268 .has_blt_ring = 1,
269 .has_llc = 1, 277 .has_llc = 1,
270 .has_force_wake = 1,
271}; 278};
272 279
273#define GEN7_FEATURES \ 280#define GEN7_FEATURES \
274 .gen = 7, .num_pipes = 3, \ 281 .gen = 7, .num_pipes = 3, \
275 .need_gfx_hws = 1, .has_hotplug = 1, \ 282 .need_gfx_hws = 1, .has_hotplug = 1, \
276 .has_bsd_ring = 1, \ 283 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
277 .has_blt_ring = 1, \ 284 .has_llc = 1
278 .has_llc = 1, \
279 .has_force_wake = 1
280 285
281static const struct intel_device_info intel_ivybridge_d_info = { 286static const struct intel_device_info intel_ivybridge_d_info = {
282 GEN7_FEATURES, 287 GEN7_FEATURES,
@@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = {
318 .is_haswell = 1, 323 .is_haswell = 1,
319 .has_ddi = 1, 324 .has_ddi = 1,
320 .has_fpga_dbg = 1, 325 .has_fpga_dbg = 1,
321 .has_vebox_ring = 1, 326 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
322}; 327};
323 328
324static const struct intel_device_info intel_haswell_m_info = { 329static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,7 @@ static const struct intel_device_info intel_haswell_m_info = {
328 .has_ddi = 1, 333 .has_ddi = 1,
329 .has_fpga_dbg = 1, 334 .has_fpga_dbg = 1,
330 .has_fbc = 1, 335 .has_fbc = 1,
331 .has_vebox_ring = 1, 336 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
332}; 337};
333 338
334/* 339/*
@@ -416,7 +421,7 @@ void intel_detect_pch(struct drm_device *dev)
416 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 421 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
417 /* PantherPoint is CPT compatible */ 422 /* PantherPoint is CPT compatible */
418 dev_priv->pch_type = PCH_CPT; 423 dev_priv->pch_type = PCH_CPT;
419 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 424 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
420 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 425 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
421 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 426 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
422 dev_priv->pch_type = PCH_LPT; 427 dev_priv->pch_type = PCH_LPT;
@@ -472,7 +477,7 @@ static int i915_drm_freeze(struct drm_device *dev)
472 /* We do a lot of poking in a lot of registers, make sure they work 477 /* We do a lot of poking in a lot of registers, make sure they work
473 * properly. */ 478 * properly. */
474 hsw_disable_package_c8(dev_priv); 479 hsw_disable_package_c8(dev_priv);
475 intel_set_power_well(dev, true); 480 intel_display_set_init_power(dev, true);
476 481
477 drm_kms_helper_poll_disable(dev); 482 drm_kms_helper_poll_disable(dev);
478 483
@@ -482,9 +487,7 @@ static int i915_drm_freeze(struct drm_device *dev)
482 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 487 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
483 int error; 488 int error;
484 489
485 mutex_lock(&dev->struct_mutex); 490 error = i915_gem_suspend(dev);
486 error = i915_gem_idle(dev);
487 mutex_unlock(&dev->struct_mutex);
488 if (error) { 491 if (error) {
489 dev_err(&dev->pdev->dev, 492 dev_err(&dev->pdev->dev,
490 "GEM idle failed, resume might fail\n"); 493 "GEM idle failed, resume might fail\n");
@@ -578,11 +581,24 @@ static void intel_resume_hotplug(struct drm_device *dev)
578 drm_helper_hpd_irq_event(dev); 581 drm_helper_hpd_irq_event(dev);
579} 582}
580 583
581static int __i915_drm_thaw(struct drm_device *dev) 584static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
582{ 585{
583 struct drm_i915_private *dev_priv = dev->dev_private; 586 struct drm_i915_private *dev_priv = dev->dev_private;
584 int error = 0; 587 int error = 0;
585 588
589 intel_uncore_early_sanitize(dev);
590
591 intel_uncore_sanitize(dev);
592
593 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
594 restore_gtt_mappings) {
595 mutex_lock(&dev->struct_mutex);
596 i915_gem_restore_gtt_mappings(dev);
597 mutex_unlock(&dev->struct_mutex);
598 }
599
600 intel_power_domains_init_hw(dev);
601
586 i915_restore_state(dev); 602 i915_restore_state(dev);
587 intel_opregion_setup(dev); 603 intel_opregion_setup(dev);
588 604
@@ -642,20 +658,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
642 658
643static int i915_drm_thaw(struct drm_device *dev) 659static int i915_drm_thaw(struct drm_device *dev)
644{ 660{
645 int error = 0; 661 if (drm_core_check_feature(dev, DRIVER_MODESET))
646
647 intel_uncore_sanitize(dev);
648
649 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
650 mutex_lock(&dev->struct_mutex);
651 i915_gem_restore_gtt_mappings(dev);
652 mutex_unlock(&dev->struct_mutex);
653 } else if (drm_core_check_feature(dev, DRIVER_MODESET))
654 i915_check_and_clear_faults(dev); 662 i915_check_and_clear_faults(dev);
655 663
656 __i915_drm_thaw(dev); 664 return __i915_drm_thaw(dev, true);
657
658 return error;
659} 665}
660 666
661int i915_resume(struct drm_device *dev) 667int i915_resume(struct drm_device *dev)
@@ -671,20 +677,12 @@ int i915_resume(struct drm_device *dev)
671 677
672 pci_set_master(dev->pdev); 678 pci_set_master(dev->pdev);
673 679
674 intel_uncore_sanitize(dev);
675
676 /* 680 /*
677 * Platforms with opregion should have sane BIOS, older ones (gen3 and 681 * Platforms with opregion should have sane BIOS, older ones (gen3 and
678 * earlier) need this since the BIOS might clear all our scratch PTEs. 682 * earlier) need to restore the GTT mappings since the BIOS might clear
683 * all our scratch PTEs.
679 */ 684 */
680 if (drm_core_check_feature(dev, DRIVER_MODESET) && 685 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
681 !dev_priv->opregion.header) {
682 mutex_lock(&dev->struct_mutex);
683 i915_gem_restore_gtt_mappings(dev);
684 mutex_unlock(&dev->struct_mutex);
685 }
686
687 ret = __i915_drm_thaw(dev);
688 if (ret) 686 if (ret)
689 return ret; 687 return ret;
690 688
@@ -722,24 +720,19 @@ int i915_reset(struct drm_device *dev)
722 720
723 simulated = dev_priv->gpu_error.stop_rings != 0; 721 simulated = dev_priv->gpu_error.stop_rings != 0;
724 722
725 if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) { 723 ret = intel_gpu_reset(dev);
726 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 724
727 ret = -ENODEV; 725 /* Also reset the gpu hangman. */
728 } else { 726 if (simulated) {
729 ret = intel_gpu_reset(dev); 727 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
730 728 dev_priv->gpu_error.stop_rings = 0;
731 /* Also reset the gpu hangman. */ 729 if (ret == -ENODEV) {
732 if (simulated) { 730 DRM_ERROR("Reset not implemented, but ignoring "
733 DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); 731 "error for simulated gpu hangs\n");
734 dev_priv->gpu_error.stop_rings = 0; 732 ret = 0;
735 if (ret == -ENODEV) { 733 }
736 DRM_ERROR("Reset not implemented, but ignoring "
737 "error for simulated gpu hangs\n");
738 ret = 0;
739 }
740 } else
741 dev_priv->gpu_error.last_reset = get_seconds();
742 } 734 }
735
743 if (ret) { 736 if (ret) {
744 DRM_ERROR("Failed to reset chip.\n"); 737 DRM_ERROR("Failed to reset chip.\n");
745 mutex_unlock(&dev->struct_mutex); 738 mutex_unlock(&dev->struct_mutex);
@@ -762,30 +755,17 @@ int i915_reset(struct drm_device *dev)
762 */ 755 */
763 if (drm_core_check_feature(dev, DRIVER_MODESET) || 756 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
764 !dev_priv->ums.mm_suspended) { 757 !dev_priv->ums.mm_suspended) {
765 struct intel_ring_buffer *ring; 758 bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
766 int i;
767
768 dev_priv->ums.mm_suspended = 0; 759 dev_priv->ums.mm_suspended = 0;
769 760
770 i915_gem_init_swizzling(dev); 761 ret = i915_gem_init_hw(dev);
771 762 if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
772 for_each_ring(ring, dev_priv, i) 763 DRM_ERROR("HW contexts didn't survive reset\n");
773 ring->init(ring);
774
775 i915_gem_context_init(dev);
776 if (dev_priv->mm.aliasing_ppgtt) {
777 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
778 if (ret)
779 i915_gem_cleanup_aliasing_ppgtt(dev);
780 }
781
782 /*
783 * It would make sense to re-init all the other hw state, at
784 * least the rps/rc6/emon init done within modeset_init_hw. For
785 * some unknown reason, this blows up my ilk, so don't.
786 */
787
788 mutex_unlock(&dev->struct_mutex); 764 mutex_unlock(&dev->struct_mutex);
765 if (ret) {
766 DRM_ERROR("Failed hw init on reset %d\n", ret);
767 return ret;
768 }
789 769
790 drm_irq_uninstall(dev); 770 drm_irq_uninstall(dev);
791 drm_irq_install(dev); 771 drm_irq_install(dev);
@@ -802,6 +782,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
802 struct intel_device_info *intel_info = 782 struct intel_device_info *intel_info =
803 (struct intel_device_info *) ent->driver_data; 783 (struct intel_device_info *) ent->driver_data;
804 784
785 if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
786 DRM_INFO("This hardware requires preliminary hardware support.\n"
787 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
788 return -ENODEV;
789 }
790
805 /* Only bind to function 0 of the device. Early generations 791 /* Only bind to function 0 of the device. Early generations
806 * used function 1 as a placeholder for multi-head. This causes 792 * used function 1 as a placeholder for multi-head. This causes
807 * us confusion instead, especially on the systems where both 793 * us confusion instead, especially on the systems where both
@@ -949,7 +935,6 @@ static struct drm_driver driver = {
949 .debugfs_init = i915_debugfs_init, 935 .debugfs_init = i915_debugfs_init,
950 .debugfs_cleanup = i915_debugfs_cleanup, 936 .debugfs_cleanup = i915_debugfs_cleanup,
951#endif 937#endif
952 .gem_init_object = i915_gem_init_object,
953 .gem_free_object = i915_gem_free_object, 938 .gem_free_object = i915_gem_free_object,
954 .gem_vm_ops = &i915_gem_vm_ops, 939 .gem_vm_ops = &i915_gem_vm_ops,
955 940
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ab0f2c0a440c..b12d942ab09c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -98,13 +98,25 @@ enum intel_display_power_domain {
98 POWER_DOMAIN_TRANSCODER_A, 98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B, 99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C, 100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, 101 POWER_DOMAIN_TRANSCODER_EDP,
102 POWER_DOMAIN_VGA,
103 POWER_DOMAIN_INIT,
104
105 POWER_DOMAIN_NUM,
102}; 106};
103 107
108#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
109
104#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 110#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 111#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 112 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A) 113#define POWER_DOMAIN_TRANSCODER(tran) \
114 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
115 (tran) + POWER_DOMAIN_TRANSCODER_A)
116
117#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
118 BIT(POWER_DOMAIN_PIPE_A) | \
119 BIT(POWER_DOMAIN_TRANSCODER_EDP))
108 120
109enum hpd_pin { 121enum hpd_pin {
110 HPD_NONE = 0, 122 HPD_NONE = 0,
@@ -225,6 +237,8 @@ struct intel_opregion {
225 struct opregion_header __iomem *header; 237 struct opregion_header __iomem *header;
226 struct opregion_acpi __iomem *acpi; 238 struct opregion_acpi __iomem *acpi;
227 struct opregion_swsci __iomem *swsci; 239 struct opregion_swsci __iomem *swsci;
240 u32 swsci_gbda_sub_functions;
241 u32 swsci_sbcb_sub_functions;
228 struct opregion_asle __iomem *asle; 242 struct opregion_asle __iomem *asle;
229 void __iomem *vbt; 243 void __iomem *vbt;
230 u32 __iomem *lid_state; 244 u32 __iomem *lid_state;
@@ -285,6 +299,7 @@ struct drm_i915_error_state {
285 u32 cpu_ring_tail[I915_NUM_RINGS]; 299 u32 cpu_ring_tail[I915_NUM_RINGS];
286 u32 error; /* gen6+ */ 300 u32 error; /* gen6+ */
287 u32 err_int; /* gen7 */ 301 u32 err_int; /* gen7 */
302 u32 bbstate[I915_NUM_RINGS];
288 u32 instpm[I915_NUM_RINGS]; 303 u32 instpm[I915_NUM_RINGS];
289 u32 instps[I915_NUM_RINGS]; 304 u32 instps[I915_NUM_RINGS];
290 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 305 u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -321,11 +336,13 @@ struct drm_i915_error_state {
321 u32 dirty:1; 336 u32 dirty:1;
322 u32 purgeable:1; 337 u32 purgeable:1;
323 s32 ring:4; 338 s32 ring:4;
324 u32 cache_level:2; 339 u32 cache_level:3;
325 } **active_bo, **pinned_bo; 340 } **active_bo, **pinned_bo;
326 u32 *active_bo_count, *pinned_bo_count; 341 u32 *active_bo_count, *pinned_bo_count;
327 struct intel_overlay_error_state *overlay; 342 struct intel_overlay_error_state *overlay;
328 struct intel_display_error_state *display; 343 struct intel_display_error_state *display;
344 int hangcheck_score[I915_NUM_RINGS];
345 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
329}; 346};
330 347
331struct intel_crtc_config; 348struct intel_crtc_config;
@@ -357,7 +374,7 @@ struct drm_i915_display_funcs {
357 int target, int refclk, 374 int target, int refclk,
358 struct dpll *match_clock, 375 struct dpll *match_clock,
359 struct dpll *best_clock); 376 struct dpll *best_clock);
360 void (*update_wm)(struct drm_device *dev); 377 void (*update_wm)(struct drm_crtc *crtc);
361 void (*update_sprite_wm)(struct drm_plane *plane, 378 void (*update_sprite_wm)(struct drm_plane *plane,
362 struct drm_crtc *crtc, 379 struct drm_crtc *crtc,
363 uint32_t sprite_width, int pixel_size, 380 uint32_t sprite_width, int pixel_size,
@@ -367,7 +384,6 @@ struct drm_i915_display_funcs {
367 * fills out the pipe-config with the hw state. */ 384 * fills out the pipe-config with the hw state. */
368 bool (*get_pipe_config)(struct intel_crtc *, 385 bool (*get_pipe_config)(struct intel_crtc *,
369 struct intel_crtc_config *); 386 struct intel_crtc_config *);
370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
371 int (*crtc_mode_set)(struct drm_crtc *crtc, 387 int (*crtc_mode_set)(struct drm_crtc *crtc,
372 int x, int y, 388 int x, int y,
373 struct drm_framebuffer *old_fb); 389 struct drm_framebuffer *old_fb);
@@ -375,7 +391,8 @@ struct drm_i915_display_funcs {
375 void (*crtc_disable)(struct drm_crtc *crtc); 391 void (*crtc_disable)(struct drm_crtc *crtc);
376 void (*off)(struct drm_crtc *crtc); 392 void (*off)(struct drm_crtc *crtc);
377 void (*write_eld)(struct drm_connector *connector, 393 void (*write_eld)(struct drm_connector *connector,
378 struct drm_crtc *crtc); 394 struct drm_crtc *crtc,
395 struct drm_display_mode *mode);
379 void (*fdi_link_train)(struct drm_crtc *crtc); 396 void (*fdi_link_train)(struct drm_crtc *crtc);
380 void (*init_clock_gating)(struct drm_device *dev); 397 void (*init_clock_gating)(struct drm_device *dev);
381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 398 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +412,20 @@ struct drm_i915_display_funcs {
395struct intel_uncore_funcs { 412struct intel_uncore_funcs {
396 void (*force_wake_get)(struct drm_i915_private *dev_priv); 413 void (*force_wake_get)(struct drm_i915_private *dev_priv);
397 void (*force_wake_put)(struct drm_i915_private *dev_priv); 414 void (*force_wake_put)(struct drm_i915_private *dev_priv);
415
416 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
417 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
418 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
419 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
420
421 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
422 uint8_t val, bool trace);
423 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
424 uint16_t val, bool trace);
425 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
426 uint32_t val, bool trace);
427 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
428 uint64_t val, bool trace);
398}; 429};
399 430
400struct intel_uncore { 431struct intel_uncore {
@@ -404,6 +435,8 @@ struct intel_uncore {
404 435
405 unsigned fifo_count; 436 unsigned fifo_count;
406 unsigned forcewake_count; 437 unsigned forcewake_count;
438
439 struct delayed_work force_wake_work;
407}; 440};
408 441
409#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 442#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +453,7 @@ struct intel_uncore {
420 func(is_ivybridge) sep \ 453 func(is_ivybridge) sep \
421 func(is_valleyview) sep \ 454 func(is_valleyview) sep \
422 func(is_haswell) sep \ 455 func(is_haswell) sep \
423 func(has_force_wake) sep \ 456 func(is_preliminary) sep \
424 func(has_fbc) sep \ 457 func(has_fbc) sep \
425 func(has_pipe_cxsr) sep \ 458 func(has_pipe_cxsr) sep \
426 func(has_hotplug) sep \ 459 func(has_hotplug) sep \
@@ -428,9 +461,6 @@ struct intel_uncore {
428 func(has_overlay) sep \ 461 func(has_overlay) sep \
429 func(overlay_needs_physical) sep \ 462 func(overlay_needs_physical) sep \
430 func(supports_tv) sep \ 463 func(supports_tv) sep \
431 func(has_bsd_ring) sep \
432 func(has_blt_ring) sep \
433 func(has_vebox_ring) sep \
434 func(has_llc) sep \ 464 func(has_llc) sep \
435 func(has_ddi) sep \ 465 func(has_ddi) sep \
436 func(has_fpga_dbg) 466 func(has_fpga_dbg)
@@ -442,6 +472,7 @@ struct intel_device_info {
442 u32 display_mmio_offset; 472 u32 display_mmio_offset;
443 u8 num_pipes:3; 473 u8 num_pipes:3;
444 u8 gen; 474 u8 gen;
475 u8 ring_mask; /* Rings supported by the HW */
445 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 476 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
446}; 477};
447 478
@@ -570,6 +601,13 @@ struct i915_vma {
570 /** This vma's place in the batchbuffer or on the eviction list */ 601 /** This vma's place in the batchbuffer or on the eviction list */
571 struct list_head exec_list; 602 struct list_head exec_list;
572 603
604 /**
605 * Used for performing relocations during execbuffer insertion.
606 */
607 struct hlist_node exec_node;
608 unsigned long exec_handle;
609 struct drm_i915_gem_exec_object2 *exec_entry;
610
573}; 611};
574 612
575struct i915_ctx_hang_stats { 613struct i915_ctx_hang_stats {
@@ -578,6 +616,12 @@ struct i915_ctx_hang_stats {
578 616
579 /* This context had batch active when hang was declared */ 617 /* This context had batch active when hang was declared */
580 unsigned batch_active; 618 unsigned batch_active;
619
620 /* Time when this context was last blamed for a GPU reset */
621 unsigned long guilty_ts;
622
623 /* This context is banned to submit more work */
624 bool banned;
581}; 625};
582 626
583/* This must match up with the value previously used for execbuf2.rsvd1. */ 627/* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -586,10 +630,13 @@ struct i915_hw_context {
586 struct kref ref; 630 struct kref ref;
587 int id; 631 int id;
588 bool is_initialized; 632 bool is_initialized;
633 uint8_t remap_slice;
589 struct drm_i915_file_private *file_priv; 634 struct drm_i915_file_private *file_priv;
590 struct intel_ring_buffer *ring; 635 struct intel_ring_buffer *ring;
591 struct drm_i915_gem_object *obj; 636 struct drm_i915_gem_object *obj;
592 struct i915_ctx_hang_stats hang_stats; 637 struct i915_ctx_hang_stats hang_stats;
638
639 struct list_head link;
593}; 640};
594 641
595struct i915_fbc { 642struct i915_fbc {
@@ -623,17 +670,9 @@ struct i915_fbc {
623 } no_fbc_reason; 670 } no_fbc_reason;
624}; 671};
625 672
626enum no_psr_reason { 673struct i915_psr {
627 PSR_NO_SOURCE, /* Not supported on platform */ 674 bool sink_support;
628 PSR_NO_SINK, /* Not supported by panel */ 675 bool source_ok;
629 PSR_MODULE_PARAM,
630 PSR_CRTC_NOT_ACTIVE,
631 PSR_PWR_WELL_ENABLED,
632 PSR_NOT_TILED,
633 PSR_SPRITE_ENABLED,
634 PSR_S3D_ENABLED,
635 PSR_INTERLACED_ENABLED,
636 PSR_HSW_NOT_DDIA,
637}; 676};
638 677
639enum intel_pch { 678enum intel_pch {
@@ -823,17 +862,20 @@ struct intel_gen6_power_mgmt {
823 struct work_struct work; 862 struct work_struct work;
824 u32 pm_iir; 863 u32 pm_iir;
825 864
826 /* On vlv we need to manually drop to Vmin with a delayed work. */
827 struct delayed_work vlv_work;
828
829 /* The below variables an all the rps hw state are protected by 865 /* The below variables an all the rps hw state are protected by
830 * dev->struct mutext. */ 866 * dev->struct mutext. */
831 u8 cur_delay; 867 u8 cur_delay;
832 u8 min_delay; 868 u8 min_delay;
833 u8 max_delay; 869 u8 max_delay;
834 u8 rpe_delay; 870 u8 rpe_delay;
871 u8 rp1_delay;
872 u8 rp0_delay;
835 u8 hw_max; 873 u8 hw_max;
836 874
875 int last_adj;
876 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
877
878 bool enabled;
837 struct delayed_work delayed_resume_work; 879 struct delayed_work delayed_resume_work;
838 880
839 /* 881 /*
@@ -870,11 +912,21 @@ struct intel_ilk_power_mgmt {
870 912
871/* Power well structure for haswell */ 913/* Power well structure for haswell */
872struct i915_power_well { 914struct i915_power_well {
873 struct drm_device *device;
874 spinlock_t lock;
875 /* power well enable/disable usage count */ 915 /* power well enable/disable usage count */
876 int count; 916 int count;
877 int i915_request; 917};
918
919#define I915_MAX_POWER_WELLS 1
920
921struct i915_power_domains {
922 /*
923 * Power wells needed for initialization at driver init and suspend
924 * time are on. They are kept on until after the first modeset.
925 */
926 bool init_power_on;
927
928 struct mutex lock;
929 struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
878}; 930};
879 931
880struct i915_dri1_state { 932struct i915_dri1_state {
@@ -902,9 +954,11 @@ struct i915_ums_state {
902 int mm_suspended; 954 int mm_suspended;
903}; 955};
904 956
957#define MAX_L3_SLICES 2
905struct intel_l3_parity { 958struct intel_l3_parity {
906 u32 *remap_info; 959 u32 *remap_info[MAX_L3_SLICES];
907 struct work_struct error_work; 960 struct work_struct error_work;
961 int which_slice;
908}; 962};
909 963
910struct i915_gem_mm { 964struct i915_gem_mm {
@@ -942,6 +996,15 @@ struct i915_gem_mm {
942 struct delayed_work retire_work; 996 struct delayed_work retire_work;
943 997
944 /** 998 /**
999 * When we detect an idle GPU, we want to turn on
1000 * powersaving features. So once we see that there
1001 * are no more requests outstanding and no more
1002 * arrive within a small period of time, we fire
1003 * off the idle_work.
1004 */
1005 struct delayed_work idle_work;
1006
1007 /**
945 * Are we in a non-interruptible section of code like 1008 * Are we in a non-interruptible section of code like
946 * modesetting? 1009 * modesetting?
947 */ 1010 */
@@ -979,6 +1042,9 @@ struct i915_gpu_error {
979 /* For hangcheck timer */ 1042 /* For hangcheck timer */
980#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1043#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
981#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1044#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1045 /* Hang gpu twice in this window and your context gets banned */
1046#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1047
982 struct timer_list hangcheck_timer; 1048 struct timer_list hangcheck_timer;
983 1049
984 /* For reset and error_state handling. */ 1050 /* For reset and error_state handling. */
@@ -987,7 +1053,8 @@ struct i915_gpu_error {
987 struct drm_i915_error_state *first_error; 1053 struct drm_i915_error_state *first_error;
988 struct work_struct work; 1054 struct work_struct work;
989 1055
990 unsigned long last_reset; 1056
1057 unsigned long missed_irq_rings;
991 1058
992 /** 1059 /**
993 * State variable and reset counter controlling the reset flow 1060 * State variable and reset counter controlling the reset flow
@@ -1027,6 +1094,9 @@ struct i915_gpu_error {
1027 1094
1028 /* For gpu hang simulation. */ 1095 /* For gpu hang simulation. */
1029 unsigned int stop_rings; 1096 unsigned int stop_rings;
1097
1098 /* For missed irq/seqno simulation. */
1099 unsigned int test_irq_rings;
1030}; 1100};
1031 1101
1032enum modeset_restore { 1102enum modeset_restore {
@@ -1035,6 +1105,14 @@ enum modeset_restore {
1035 MODESET_SUSPENDED, 1105 MODESET_SUSPENDED,
1036}; 1106};
1037 1107
1108struct ddi_vbt_port_info {
1109 uint8_t hdmi_level_shift;
1110
1111 uint8_t supports_dvi:1;
1112 uint8_t supports_hdmi:1;
1113 uint8_t supports_dp:1;
1114};
1115
1038struct intel_vbt_data { 1116struct intel_vbt_data {
1039 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1117 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1040 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1118 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1060,10 +1138,17 @@ struct intel_vbt_data {
1060 int edp_bpp; 1138 int edp_bpp;
1061 struct edp_power_seq edp_pps; 1139 struct edp_power_seq edp_pps;
1062 1140
1141 /* MIPI DSI */
1142 struct {
1143 u16 panel_id;
1144 } dsi;
1145
1063 int crt_ddc_pin; 1146 int crt_ddc_pin;
1064 1147
1065 int child_dev_num; 1148 int child_dev_num;
1066 struct child_device_config *child_dev; 1149 union child_device_config *child_dev;
1150
1151 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1067}; 1152};
1068 1153
1069enum intel_ddb_partitioning { 1154enum intel_ddb_partitioning {
@@ -1079,6 +1164,15 @@ struct intel_wm_level {
1079 uint32_t fbc_val; 1164 uint32_t fbc_val;
1080}; 1165};
1081 1166
1167struct hsw_wm_values {
1168 uint32_t wm_pipe[3];
1169 uint32_t wm_lp[3];
1170 uint32_t wm_lp_spr[3];
1171 uint32_t wm_linetime[3];
1172 bool enable_fbc_wm;
1173 enum intel_ddb_partitioning partitioning;
1174};
1175
1082/* 1176/*
1083 * This struct tracks the state needed for the Package C8+ feature. 1177 * This struct tracks the state needed for the Package C8+ feature.
1084 * 1178 *
@@ -1148,6 +1242,36 @@ struct i915_package_c8 {
1148 } regsave; 1242 } regsave;
1149}; 1243};
1150 1244
1245enum intel_pipe_crc_source {
1246 INTEL_PIPE_CRC_SOURCE_NONE,
1247 INTEL_PIPE_CRC_SOURCE_PLANE1,
1248 INTEL_PIPE_CRC_SOURCE_PLANE2,
1249 INTEL_PIPE_CRC_SOURCE_PF,
1250 INTEL_PIPE_CRC_SOURCE_PIPE,
1251 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1252 INTEL_PIPE_CRC_SOURCE_TV,
1253 INTEL_PIPE_CRC_SOURCE_DP_B,
1254 INTEL_PIPE_CRC_SOURCE_DP_C,
1255 INTEL_PIPE_CRC_SOURCE_DP_D,
1256 INTEL_PIPE_CRC_SOURCE_AUTO,
1257 INTEL_PIPE_CRC_SOURCE_MAX,
1258};
1259
1260struct intel_pipe_crc_entry {
1261 uint32_t frame;
1262 uint32_t crc[5];
1263};
1264
1265#define INTEL_PIPE_CRC_ENTRIES_NR 128
1266struct intel_pipe_crc {
1267 spinlock_t lock;
1268 bool opened; /* exclusive access to the result file */
1269 struct intel_pipe_crc_entry *entries;
1270 enum intel_pipe_crc_source source;
1271 int head, tail;
1272 wait_queue_head_t wq;
1273};
1274
1151typedef struct drm_i915_private { 1275typedef struct drm_i915_private {
1152 struct drm_device *dev; 1276 struct drm_device *dev;
1153 struct kmem_cache *slab; 1277 struct kmem_cache *slab;
@@ -1272,6 +1396,10 @@ typedef struct drm_i915_private {
1272 struct drm_crtc *pipe_to_crtc_mapping[3]; 1396 struct drm_crtc *pipe_to_crtc_mapping[3];
1273 wait_queue_head_t pending_flip_queue; 1397 wait_queue_head_t pending_flip_queue;
1274 1398
1399#ifdef CONFIG_DEBUG_FS
1400 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1401#endif
1402
1275 int num_shared_dpll; 1403 int num_shared_dpll;
1276 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1404 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1277 struct intel_ddi_plls ddi_plls; 1405 struct intel_ddi_plls ddi_plls;
@@ -1297,17 +1425,18 @@ typedef struct drm_i915_private {
1297 * mchdev_lock in intel_pm.c */ 1425 * mchdev_lock in intel_pm.c */
1298 struct intel_ilk_power_mgmt ips; 1426 struct intel_ilk_power_mgmt ips;
1299 1427
1300 /* Haswell power well */ 1428 struct i915_power_domains power_domains;
1301 struct i915_power_well power_well;
1302 1429
1303 enum no_psr_reason no_psr_reason; 1430 struct i915_psr psr;
1304 1431
1305 struct i915_gpu_error gpu_error; 1432 struct i915_gpu_error gpu_error;
1306 1433
1307 struct drm_i915_gem_object *vlv_pctx; 1434 struct drm_i915_gem_object *vlv_pctx;
1308 1435
1436#ifdef CONFIG_DRM_I915_FBDEV
1309 /* list of fbdev register on this device */ 1437 /* list of fbdev register on this device */
1310 struct intel_fbdev *fbdev; 1438 struct intel_fbdev *fbdev;
1439#endif
1311 1440
1312 /* 1441 /*
1313 * The console may be contended at resume, but we don't 1442 * The console may be contended at resume, but we don't
@@ -1320,6 +1449,7 @@ typedef struct drm_i915_private {
1320 1449
1321 bool hw_contexts_disabled; 1450 bool hw_contexts_disabled;
1322 uint32_t hw_context_size; 1451 uint32_t hw_context_size;
1452 struct list_head context_list;
1323 1453
1324 u32 fdi_rx_config; 1454 u32 fdi_rx_config;
1325 1455
@@ -1337,6 +1467,9 @@ typedef struct drm_i915_private {
1337 uint16_t spr_latency[5]; 1467 uint16_t spr_latency[5];
1338 /* cursor */ 1468 /* cursor */
1339 uint16_t cur_latency[5]; 1469 uint16_t cur_latency[5];
1470
1471 /* current hardware state */
1472 struct hsw_wm_values hw;
1340 } wm; 1473 } wm;
1341 1474
1342 struct i915_package_c8 pc8; 1475 struct i915_package_c8 pc8;
@@ -1400,8 +1533,6 @@ struct drm_i915_gem_object {
1400 struct list_head ring_list; 1533 struct list_head ring_list;
1401 /** Used in execbuf to temporarily hold a ref */ 1534 /** Used in execbuf to temporarily hold a ref */
1402 struct list_head obj_exec_link; 1535 struct list_head obj_exec_link;
1403 /** This object's place in the batchbuffer or on the eviction list */
1404 struct list_head exec_list;
1405 1536
1406 /** 1537 /**
1407 * This is set if the object is on the active lists (has pending 1538 * This is set if the object is on the active lists (has pending
@@ -1487,13 +1618,6 @@ struct drm_i915_gem_object {
1487 void *dma_buf_vmapping; 1618 void *dma_buf_vmapping;
1488 int vmapping_count; 1619 int vmapping_count;
1489 1620
1490 /**
1491 * Used for performing relocations during execbuffer insertion.
1492 */
1493 struct hlist_node exec_node;
1494 unsigned long exec_handle;
1495 struct drm_i915_gem_exec_object2 *exec_entry;
1496
1497 struct intel_ring_buffer *ring; 1621 struct intel_ring_buffer *ring;
1498 1622
1499 /** Breadcrumb of last rendering to the buffer. */ 1623 /** Breadcrumb of last rendering to the buffer. */
@@ -1505,11 +1629,14 @@ struct drm_i915_gem_object {
1505 /** Current tiling stride for the object, if it's tiled. */ 1629 /** Current tiling stride for the object, if it's tiled. */
1506 uint32_t stride; 1630 uint32_t stride;
1507 1631
1632 /** References from framebuffers, locks out tiling changes. */
1633 unsigned long framebuffer_references;
1634
1508 /** Record of address bit 17 of each page at last unbind. */ 1635 /** Record of address bit 17 of each page at last unbind. */
1509 unsigned long *bit_17; 1636 unsigned long *bit_17;
1510 1637
1511 /** User space pin count and filp owning the pin */ 1638 /** User space pin count and filp owning the pin */
1512 uint32_t user_pin_count; 1639 unsigned long user_pin_count;
1513 struct drm_file *pin_filp; 1640 struct drm_file *pin_filp;
1514 1641
1515 /** for phy allocated objects */ 1642 /** for phy allocated objects */
@@ -1560,48 +1687,55 @@ struct drm_i915_gem_request {
1560}; 1687};
1561 1688
1562struct drm_i915_file_private { 1689struct drm_i915_file_private {
1690 struct drm_i915_private *dev_priv;
1691
1563 struct { 1692 struct {
1564 spinlock_t lock; 1693 spinlock_t lock;
1565 struct list_head request_list; 1694 struct list_head request_list;
1695 struct delayed_work idle_work;
1566 } mm; 1696 } mm;
1567 struct idr context_idr; 1697 struct idr context_idr;
1568 1698
1569 struct i915_ctx_hang_stats hang_stats; 1699 struct i915_ctx_hang_stats hang_stats;
1700 atomic_t rps_wait_boost;
1570}; 1701};
1571 1702
1572#define INTEL_INFO(dev) (to_i915(dev)->info) 1703#define INTEL_INFO(dev) (to_i915(dev)->info)
1573 1704
1574#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1705#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1575#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1706#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
1576#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1707#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1577#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1708#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
1578#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1709#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1579#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1710#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
1580#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1711#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
1581#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1712#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1582#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1713#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1583#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1714#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1584#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1715#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
1585#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1716#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1586#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1717#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
1587#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1718#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
1588#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1719#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1589#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1720#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1590#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1721#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
1591#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1722#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1592#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1723#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
1593 (dev)->pci_device == 0x0152 || \ 1724 (dev)->pdev->device == 0x0152 || \
1594 (dev)->pci_device == 0x015a) 1725 (dev)->pdev->device == 0x015a)
1595#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ 1726#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
1596 (dev)->pci_device == 0x0106 || \ 1727 (dev)->pdev->device == 0x0106 || \
1597 (dev)->pci_device == 0x010A) 1728 (dev)->pdev->device == 0x010A)
1598#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1729#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1599#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1730#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1600#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1731#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1601#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1732#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1602 ((dev)->pci_device & 0xFF00) == 0x0C00) 1733 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1603#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1734#define IS_ULT(dev) (IS_HASWELL(dev) && \
1604 ((dev)->pci_device & 0xFF00) == 0x0A00) 1735 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1736#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1737 ((dev)->pdev->device & 0x00F0) == 0x0020)
1738#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1605 1739
1606/* 1740/*
1607 * The genX designation typically refers to the render engine, so render 1741 * The genX designation typically refers to the render engine, so render
@@ -1616,9 +1750,13 @@ struct drm_i915_file_private {
1616#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1750#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1617#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 1751#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1618 1752
1619#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 1753#define RENDER_RING (1<<RCS)
1620#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1754#define BSD_RING (1<<VCS)
1621#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) 1755#define BLT_RING (1<<BCS)
1756#define VEBOX_RING (1<<VECS)
1757#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
1758#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
1759#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
1622#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1760#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1623#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) 1761#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1624#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1762#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -1640,7 +1778,6 @@ struct drm_i915_file_private {
1640#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1778#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1641#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1779#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1642#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1780#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1643#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1644#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1781#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1645#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1782#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1646 1783
@@ -1653,6 +1790,7 @@ struct drm_i915_file_private {
1653#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1790#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1654#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1791#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1655#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1792#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1793#define HAS_PSR(dev) (IS_HASWELL(dev))
1656 1794
1657#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1795#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1658#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1796#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1668,35 +1806,14 @@ struct drm_i915_file_private {
1668#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 1806#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1669#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 1807#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1670 1808
1671#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1809/* DPF == dynamic parity feature */
1672 1810#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1673#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1811#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
1674 1812
1675#define GT_FREQUENCY_MULTIPLIER 50 1813#define GT_FREQUENCY_MULTIPLIER 50
1676 1814
1677#include "i915_trace.h" 1815#include "i915_trace.h"
1678 1816
1679/**
1680 * RC6 is a special power stage which allows the GPU to enter an very
1681 * low-voltage mode when idle, using down to 0V while at this stage. This
1682 * stage is entered automatically when the GPU is idle when RC6 support is
1683 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1684 *
1685 * There are different RC6 modes available in Intel GPU, which differentiate
1686 * among each other with the latency required to enter and leave RC6 and
1687 * voltage consumed by the GPU in different states.
1688 *
1689 * The combination of the following flags define which states GPU is allowed
1690 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1691 * RC6pp is deepest RC6. Their support by hardware varies according to the
1692 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1693 * which brings the most power savings; deeper states save more power, but
1694 * require higher latency to switch to and wake up.
1695 */
1696#define INTEL_RC6_ENABLE (1<<0)
1697#define INTEL_RC6p_ENABLE (1<<1)
1698#define INTEL_RC6pp_ENABLE (1<<2)
1699
1700extern const struct drm_ioctl_desc i915_ioctls[]; 1817extern const struct drm_ioctl_desc i915_ioctls[];
1701extern int i915_max_ioctl; 1818extern int i915_max_ioctl;
1702extern unsigned int i915_fbpercrtc __always_unused; 1819extern unsigned int i915_fbpercrtc __always_unused;
@@ -1767,12 +1884,13 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
1767extern void intel_uncore_init(struct drm_device *dev); 1884extern void intel_uncore_init(struct drm_device *dev);
1768extern void intel_uncore_clear_errors(struct drm_device *dev); 1885extern void intel_uncore_clear_errors(struct drm_device *dev);
1769extern void intel_uncore_check_errors(struct drm_device *dev); 1886extern void intel_uncore_check_errors(struct drm_device *dev);
1887extern void intel_uncore_fini(struct drm_device *dev);
1770 1888
1771void 1889void
1772i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1890i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
1773 1891
1774void 1892void
1775i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1893i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
1776 1894
1777/* i915_gem.c */ 1895/* i915_gem.c */
1778int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1896int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1824,14 +1942,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1824void i915_gem_load(struct drm_device *dev); 1942void i915_gem_load(struct drm_device *dev);
1825void *i915_gem_object_alloc(struct drm_device *dev); 1943void *i915_gem_object_alloc(struct drm_device *dev);
1826void i915_gem_object_free(struct drm_i915_gem_object *obj); 1944void i915_gem_object_free(struct drm_i915_gem_object *obj);
1827int i915_gem_init_object(struct drm_gem_object *obj);
1828void i915_gem_object_init(struct drm_i915_gem_object *obj, 1945void i915_gem_object_init(struct drm_i915_gem_object *obj,
1829 const struct drm_i915_gem_object_ops *ops); 1946 const struct drm_i915_gem_object_ops *ops);
1830struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1947struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1831 size_t size); 1948 size_t size);
1832void i915_gem_free_object(struct drm_gem_object *obj); 1949void i915_gem_free_object(struct drm_gem_object *obj);
1833struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
1834 struct i915_address_space *vm);
1835void i915_gem_vma_destroy(struct i915_vma *vma); 1950void i915_gem_vma_destroy(struct i915_vma *vma);
1836 1951
1837int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1952int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1870,9 +1985,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1870int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1985int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1871int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1986int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1872 struct intel_ring_buffer *to); 1987 struct intel_ring_buffer *to);
1873void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1988void i915_vma_move_to_active(struct i915_vma *vma,
1874 struct intel_ring_buffer *ring); 1989 struct intel_ring_buffer *ring);
1875
1876int i915_gem_dumb_create(struct drm_file *file_priv, 1990int i915_gem_dumb_create(struct drm_file *file_priv,
1877 struct drm_device *dev, 1991 struct drm_device *dev,
1878 struct drm_mode_create_dumb *args); 1992 struct drm_mode_create_dumb *args);
@@ -1913,7 +2027,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1913 } 2027 }
1914} 2028}
1915 2029
1916void i915_gem_retire_requests(struct drm_device *dev); 2030bool i915_gem_retire_requests(struct drm_device *dev);
1917void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 2031void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1918int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2032int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1919 bool interruptible); 2033 bool interruptible);
@@ -1933,11 +2047,11 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1933int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2047int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1934int __must_check i915_gem_init(struct drm_device *dev); 2048int __must_check i915_gem_init(struct drm_device *dev);
1935int __must_check i915_gem_init_hw(struct drm_device *dev); 2049int __must_check i915_gem_init_hw(struct drm_device *dev);
1936void i915_gem_l3_remap(struct drm_device *dev); 2050int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
1937void i915_gem_init_swizzling(struct drm_device *dev); 2051void i915_gem_init_swizzling(struct drm_device *dev);
1938void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2052void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1939int __must_check i915_gpu_idle(struct drm_device *dev); 2053int __must_check i915_gpu_idle(struct drm_device *dev);
1940int __must_check i915_gem_idle(struct drm_device *dev); 2054int __must_check i915_gem_suspend(struct drm_device *dev);
1941int __i915_add_request(struct intel_ring_buffer *ring, 2055int __i915_add_request(struct intel_ring_buffer *ring,
1942 struct drm_file *file, 2056 struct drm_file *file,
1943 struct drm_i915_gem_object *batch_obj, 2057 struct drm_i915_gem_object *batch_obj,
@@ -1964,6 +2078,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
1964void i915_gem_detach_phys_object(struct drm_device *dev, 2078void i915_gem_detach_phys_object(struct drm_device *dev,
1965 struct drm_i915_gem_object *obj); 2079 struct drm_i915_gem_object *obj);
1966void i915_gem_free_all_phys_object(struct drm_device *dev); 2080void i915_gem_free_all_phys_object(struct drm_device *dev);
2081int i915_gem_open(struct drm_device *dev, struct drm_file *file);
1967void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2082void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1968 2083
1969uint32_t 2084uint32_t
@@ -1995,6 +2110,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
1995struct i915_vma * 2110struct i915_vma *
1996i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2111i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
1997 struct i915_address_space *vm); 2112 struct i915_address_space *vm);
2113
2114struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2115
1998/* Some GGTT VM helpers */ 2116/* Some GGTT VM helpers */
1999#define obj_to_ggtt(obj) \ 2117#define obj_to_ggtt(obj) \
2000 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2118 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2031,7 +2149,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2031 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, 2149 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2032 map_and_fenceable, nonblocking); 2150 map_and_fenceable, nonblocking);
2033} 2151}
2034#undef obj_to_ggtt
2035 2152
2036/* i915_gem_context.c */ 2153/* i915_gem_context.c */
2037void i915_gem_context_init(struct drm_device *dev); 2154void i915_gem_context_init(struct drm_device *dev);
@@ -2094,6 +2211,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2094 unsigned cache_level, 2211 unsigned cache_level,
2095 bool mappable, 2212 bool mappable,
2096 bool nonblock); 2213 bool nonblock);
2214int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2097int i915_gem_evict_everything(struct drm_device *dev); 2215int i915_gem_evict_everything(struct drm_device *dev);
2098 2216
2099/* i915_gem_stolen.c */ 2217/* i915_gem_stolen.c */
@@ -2133,6 +2251,11 @@ int i915_verify_lists(struct drm_device *dev);
2133/* i915_debugfs.c */ 2251/* i915_debugfs.c */
2134int i915_debugfs_init(struct drm_minor *minor); 2252int i915_debugfs_init(struct drm_minor *minor);
2135void i915_debugfs_cleanup(struct drm_minor *minor); 2253void i915_debugfs_cleanup(struct drm_minor *minor);
2254#ifdef CONFIG_DEBUG_FS
2255void intel_display_crc_init(struct drm_device *dev);
2256#else
2257static inline void intel_display_crc_init(struct drm_device *dev) {}
2258#endif
2136 2259
2137/* i915_gpu_error.c */ 2260/* i915_gpu_error.c */
2138__printf(2, 3) 2261__printf(2, 3)
@@ -2186,15 +2309,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2186extern void intel_i2c_reset(struct drm_device *dev); 2309extern void intel_i2c_reset(struct drm_device *dev);
2187 2310
2188/* intel_opregion.c */ 2311/* intel_opregion.c */
2312struct intel_encoder;
2189extern int intel_opregion_setup(struct drm_device *dev); 2313extern int intel_opregion_setup(struct drm_device *dev);
2190#ifdef CONFIG_ACPI 2314#ifdef CONFIG_ACPI
2191extern void intel_opregion_init(struct drm_device *dev); 2315extern void intel_opregion_init(struct drm_device *dev);
2192extern void intel_opregion_fini(struct drm_device *dev); 2316extern void intel_opregion_fini(struct drm_device *dev);
2193extern void intel_opregion_asle_intr(struct drm_device *dev); 2317extern void intel_opregion_asle_intr(struct drm_device *dev);
2318extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2319 bool enable);
2320extern int intel_opregion_notify_adapter(struct drm_device *dev,
2321 pci_power_t state);
2194#else 2322#else
2195static inline void intel_opregion_init(struct drm_device *dev) { return; } 2323static inline void intel_opregion_init(struct drm_device *dev) { return; }
2196static inline void intel_opregion_fini(struct drm_device *dev) { return; } 2324static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2197static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 2325static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
2326static inline int
2327intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2328{
2329 return 0;
2330}
2331static inline int
2332intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2333{
2334 return 0;
2335}
2198#endif 2336#endif
2199 2337
2200/* intel_acpi.c */ 2338/* intel_acpi.c */
@@ -2256,8 +2394,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
2256u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); 2394u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2257void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); 2395void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2258u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 2396u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2259u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg); 2397u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2260void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val); 2398void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2399u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2400void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2401u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2402void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2403u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2404void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2405u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2406void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
2261u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 2407u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2262 enum intel_sbi_destination destination); 2408 enum intel_sbi_destination destination);
2263void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 2409void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2266,37 +2412,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2266int vlv_gpu_freq(int ddr_freq, int val); 2412int vlv_gpu_freq(int ddr_freq, int val);
2267int vlv_freq_opcode(int ddr_freq, int val); 2413int vlv_freq_opcode(int ddr_freq, int val);
2268 2414
2269#define __i915_read(x) \ 2415#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2270 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); 2416#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
2271__i915_read(8) 2417
2272__i915_read(16) 2418#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2273__i915_read(32) 2419#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2274__i915_read(64) 2420#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2275#undef __i915_read 2421#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
2276 2422
2277#define __i915_write(x) \ 2423#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2278 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); 2424#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2279__i915_write(8) 2425#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2280__i915_write(16) 2426#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2281__i915_write(32) 2427
2282__i915_write(64) 2428#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2283#undef __i915_write 2429#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2284
2285#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2286#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2287
2288#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2289#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2290#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2291#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2292
2293#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2294#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2295#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2296#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2297
2298#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2299#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2300 2430
2301#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2431#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2302#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2432#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdfb9da0e4ce..e7b39d731db6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force); 42 bool force);
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
44i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 47i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
45 struct i915_address_space *vm, 48 struct i915_address_space *vm,
46 unsigned alignment, 49 unsigned alignment,
@@ -61,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61 struct shrink_control *sc); 64 struct shrink_control *sc);
62static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, 65static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63 struct shrink_control *sc); 66 struct shrink_control *sc);
64static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); 67static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 69static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
67 70
68static bool cpu_cache_is_coherent(struct drm_device *dev, 71static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file,
258 struct drm_mode_create_dumb *args) 261 struct drm_mode_create_dumb *args)
259{ 262{
260 /* have to work out size/pitch and return them */ 263 /* have to work out size/pitch and return them */
261 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); 264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
262 args->size = args->pitch * args->height; 265 args->size = args->pitch * args->height;
263 return i915_gem_create(file, dev, 266 return i915_gem_create(file, dev,
264 args->size, &args->handle); 267 args->size, &args->handle);
@@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
432 * optimizes for the case when the gpu will dirty the data 435 * optimizes for the case when the gpu will dirty the data
433 * anyway again before the next pread happens. */ 436 * anyway again before the next pread happens. */
434 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); 437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
435 if (i915_gem_obj_bound_any(obj)) { 438 ret = i915_gem_object_wait_rendering(obj, true);
436 ret = i915_gem_object_set_to_gtt_domain(obj, false); 439 if (ret)
437 if (ret) 440 return ret;
438 return ret;
439 }
440 } 441 }
441 442
442 ret = i915_gem_object_get_pages(obj); 443 ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
748 * optimizes for the case when the gpu will use the data 749 * optimizes for the case when the gpu will use the data
749 * right away and we therefore have to clflush anyway. */ 750 * right away and we therefore have to clflush anyway. */
750 needs_clflush_after = cpu_write_needs_clflush(obj); 751 needs_clflush_after = cpu_write_needs_clflush(obj);
751 if (i915_gem_obj_bound_any(obj)) { 752 ret = i915_gem_object_wait_rendering(obj, false);
752 ret = i915_gem_object_set_to_gtt_domain(obj, true); 753 if (ret)
753 if (ret) 754 return ret;
754 return ret;
755 }
756 } 755 }
757 /* Same trick applies to invalidate partially written cachelines read 756 /* Same trick applies to invalidate partially written cachelines read
758 * before writing. */ 757 * before writing. */
@@ -966,12 +965,31 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
966 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
967 966
968 ret = 0; 967 ret = 0;
969 if (seqno == ring->outstanding_lazy_request) 968 if (seqno == ring->outstanding_lazy_seqno)
970 ret = i915_add_request(ring, NULL); 969 ret = i915_add_request(ring, NULL);
971 970
972 return ret; 971 return ret;
973} 972}
974 973
974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
975/** 993/**
976 * __wait_seqno - wait until execution of seqno has finished 994 * __wait_seqno - wait until execution of seqno has finished
977 * @ring: the ring expected to report seqno 995 * @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
992 */ 1010 */
993static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
994 unsigned reset_counter, 1012 unsigned reset_counter,
995 bool interruptible, struct timespec *timeout) 1013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
996{ 1016{
997 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
998 struct timespec before, now, wait_time={1,0}; 1018 struct timespec before, now;
999 unsigned long timeout_jiffies; 1019 DEFINE_WAIT(wait);
1000 long end; 1020 long timeout_jiffies;
1001 bool wait_forever = true;
1002 int ret; 1021 int ret;
1003 1022
1004 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); 1023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1006 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1007 return 0; 1026 return 0;
1008 1027
1009 trace_i915_gem_request_wait_begin(ring, seqno); 1028 timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
1010 1029
1011 if (timeout != NULL) { 1030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1012 wait_time = *timeout; 1031 gen6_rps_boost(dev_priv);
1013 wait_forever = false; 1032 if (file_priv)
1033 mod_delayed_work(dev_priv->wq,
1034 &file_priv->mm.idle_work,
1035 msecs_to_jiffies(100));
1014 } 1036 }
1015 1037
1016 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); 1038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
1017 1039 WARN_ON(!ring->irq_get(ring)))
1018 if (WARN_ON(!ring->irq_get(ring)))
1019 return -ENODEV; 1040 return -ENODEV;
1020 1041
1021 /* Record current time in case interrupted by signal, or wedged * */ 1042 /* Record current time in case interrupted by signal, or wedged */
1043 trace_i915_gem_request_wait_begin(ring, seqno);
1022 getrawmonotonic(&before); 1044 getrawmonotonic(&before);
1045 for (;;) {
1046 struct timer_list timer;
1047 unsigned long expire;
1023 1048
1024#define EXIT_COND \ 1049 prepare_to_wait(&ring->irq_queue, &wait,
1025 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ 1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1026 i915_reset_in_progress(&dev_priv->gpu_error) || \
1027 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1028 do {
1029 if (interruptible)
1030 end = wait_event_interruptible_timeout(ring->irq_queue,
1031 EXIT_COND,
1032 timeout_jiffies);
1033 else
1034 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1035 timeout_jiffies);
1036 1051
1037 /* We need to check whether any gpu reset happened in between 1052 /* We need to check whether any gpu reset happened in between
1038 * the caller grabbing the seqno and now ... */ 1053 * the caller grabbing the seqno and now ... */
1039 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 1054 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1040 end = -EAGAIN; 1055 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1056 * is truely gone. */
1057 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 if (ret == 0)
1059 ret = -EAGAIN;
1060 break;
1061 }
1041 1062
1042 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely 1063 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1043 * gone. */ 1064 ret = 0;
1044 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1065 break;
1045 if (ret) 1066 }
1046 end = ret;
1047 } while (end == 0 && wait_forever);
1048 1067
1068 if (interruptible && signal_pending(current)) {
1069 ret = -ERESTARTSYS;
1070 break;
1071 }
1072
1073 if (timeout_jiffies <= 0) {
1074 ret = -ETIME;
1075 break;
1076 }
1077
1078 timer.function = NULL;
1079 if (timeout || missed_irq(dev_priv, ring)) {
1080 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1081 expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
1082 mod_timer(&timer, expire);
1083 }
1084
1085 io_schedule();
1086
1087 if (timeout)
1088 timeout_jiffies = expire - jiffies;
1089
1090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
1049 getrawmonotonic(&now); 1095 getrawmonotonic(&now);
1096 trace_i915_gem_request_wait_end(ring, seqno);
1050 1097
1051 ring->irq_put(ring); 1098 ring->irq_put(ring);
1052 trace_i915_gem_request_wait_end(ring, seqno); 1099
1053#undef EXIT_COND 1100 finish_wait(&ring->irq_queue, &wait);
1054 1101
1055 if (timeout) { 1102 if (timeout) {
1056 struct timespec sleep_time = timespec_sub(now, before); 1103 struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1059 set_normalized_timespec(timeout, 0, 0); 1106 set_normalized_timespec(timeout, 0, 0);
1060 } 1107 }
1061 1108
1062 switch (end) { 1109 return ret;
1063 case -EIO:
1064 case -EAGAIN: /* Wedged */
1065 case -ERESTARTSYS: /* Signal */
1066 return (int)end;
1067 case 0: /* Timeout */
1068 return -ETIME;
1069 default: /* Completed */
1070 WARN_ON(end < 0); /* We're not aware of other errors */
1071 return 0;
1072 }
1073} 1110}
1074 1111
1075/** 1112/**
@@ -1097,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1097 1134
1098 return __wait_seqno(ring, seqno, 1135 return __wait_seqno(ring, seqno,
1099 atomic_read(&dev_priv->gpu_error.reset_counter), 1136 atomic_read(&dev_priv->gpu_error.reset_counter),
1100 interruptible, NULL); 1137 interruptible, NULL, NULL);
1101} 1138}
1102 1139
1103static int 1140static int
@@ -1147,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1147 */ 1184 */
1148static __must_check int 1185static __must_check int
1149i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1186i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1187 struct drm_file *file,
1150 bool readonly) 1188 bool readonly)
1151{ 1189{
1152 struct drm_device *dev = obj->base.dev; 1190 struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1173 1211
1174 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1175 mutex_unlock(&dev->struct_mutex); 1213 mutex_unlock(&dev->struct_mutex);
1176 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1214 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
1177 mutex_lock(&dev->struct_mutex); 1215 mutex_lock(&dev->struct_mutex);
1178 if (ret) 1216 if (ret)
1179 return ret; 1217 return ret;
@@ -1222,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1222 * We will repeat the flush holding the lock in the normal manner 1260 * We will repeat the flush holding the lock in the normal manner
1223 * to catch cases where we are gazumped. 1261 * to catch cases where we are gazumped.
1224 */ 1262 */
1225 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); 1263 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
1226 if (ret) 1264 if (ret)
1227 goto unref; 1265 goto unref;
1228 1266
@@ -1690,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1690 return 0; 1728 return 0;
1691} 1729}
1692 1730
1693static long 1731static unsigned long
1694__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1732__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1695 bool purgeable_only) 1733 bool purgeable_only)
1696{ 1734{
1697 struct list_head still_bound_list; 1735 struct list_head still_bound_list;
1698 struct drm_i915_gem_object *obj, *next; 1736 struct drm_i915_gem_object *obj, *next;
1699 long count = 0; 1737 unsigned long count = 0;
1700 1738
1701 list_for_each_entry_safe(obj, next, 1739 list_for_each_entry_safe(obj, next,
1702 &dev_priv->mm.unbound_list, 1740 &dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1762 return count; 1800 return count;
1763} 1801}
1764 1802
1765static long 1803static unsigned long
1766i915_gem_purge(struct drm_i915_private *dev_priv, long target) 1804i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1767{ 1805{
1768 return __i915_gem_shrink(dev_priv, target, true); 1806 return __i915_gem_shrink(dev_priv, target, true);
1769} 1807}
1770 1808
1771static long 1809static unsigned long
1772i915_gem_shrink_all(struct drm_i915_private *dev_priv) 1810i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1773{ 1811{
1774 struct drm_i915_gem_object *obj, *next; 1812 struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1778 1816
1779 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 1817 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1780 global_list) { 1818 global_list) {
1781 if (obj->pages_pin_count == 0) 1819 if (i915_gem_object_put_pages(obj) == 0)
1782 freed += obj->base.size >> PAGE_SHIFT; 1820 freed += obj->base.size >> PAGE_SHIFT;
1783 i915_gem_object_put_pages(obj);
1784 } 1821 }
1785 return freed; 1822 return freed;
1786} 1823}
@@ -1865,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1865 sg->length += PAGE_SIZE; 1902 sg->length += PAGE_SIZE;
1866 } 1903 }
1867 last_pfn = page_to_pfn(page); 1904 last_pfn = page_to_pfn(page);
1905
1906 /* Check that the i965g/gm workaround works. */
1907 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
1868 } 1908 }
1869#ifdef CONFIG_SWIOTLB 1909#ifdef CONFIG_SWIOTLB
1870 if (!swiotlb_nr_tbl()) 1910 if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1918 return 0; 1958 return 0;
1919} 1959}
1920 1960
1921void 1961static void
1922i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1962i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1923 struct intel_ring_buffer *ring) 1963 struct intel_ring_buffer *ring)
1924{ 1964{
@@ -1957,6 +1997,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1957 } 1997 }
1958} 1998}
1959 1999
2000void i915_vma_move_to_active(struct i915_vma *vma,
2001 struct intel_ring_buffer *ring)
2002{
2003 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2004 return i915_gem_object_move_to_active(vma->obj, ring);
2005}
2006
1960static void 2007static void
1961i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2008i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1962{ 2009{
@@ -2078,11 +2125,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2078 if (ret) 2125 if (ret)
2079 return ret; 2126 return ret;
2080 2127
2081 request = kmalloc(sizeof(*request), GFP_KERNEL); 2128 request = ring->preallocated_lazy_request;
2082 if (request == NULL) 2129 if (WARN_ON(request == NULL))
2083 return -ENOMEM; 2130 return -ENOMEM;
2084 2131
2085
2086 /* Record the position of the start of the request so that 2132 /* Record the position of the start of the request so that
2087 * should we detect the updated seqno part-way through the 2133 * should we detect the updated seqno part-way through the
2088 * GPU processing the request, we never over-estimate the 2134 * GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2091 request_ring_position = intel_ring_get_tail(ring); 2137 request_ring_position = intel_ring_get_tail(ring);
2092 2138
2093 ret = ring->add_request(ring); 2139 ret = ring->add_request(ring);
2094 if (ret) { 2140 if (ret)
2095 kfree(request);
2096 return ret; 2141 return ret;
2097 }
2098 2142
2099 request->seqno = intel_ring_get_seqno(ring); 2143 request->seqno = intel_ring_get_seqno(ring);
2100 request->ring = ring; 2144 request->ring = ring;
2101 request->head = request_start; 2145 request->head = request_start;
2102 request->tail = request_ring_position; 2146 request->tail = request_ring_position;
2103 request->ctx = ring->last_context;
2104 request->batch_obj = obj;
2105 2147
2106 /* Whilst this request exists, batch_obj will be on the 2148 /* Whilst this request exists, batch_obj will be on the
2107 * active_list, and so will hold the active reference. Only when this 2149 * active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2109 * inactive_list and lose its active reference. Hence we do not need 2151 * inactive_list and lose its active reference. Hence we do not need
2110 * to explicitly hold another reference here. 2152 * to explicitly hold another reference here.
2111 */ 2153 */
2154 request->batch_obj = obj;
2112 2155
2156 /* Hold a reference to the current context so that we can inspect
2157 * it later in case a hangcheck error event fires.
2158 */
2159 request->ctx = ring->last_context;
2113 if (request->ctx) 2160 if (request->ctx)
2114 i915_gem_context_reference(request->ctx); 2161 i915_gem_context_reference(request->ctx);
2115 2162
@@ -2129,12 +2176,14 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2129 } 2176 }
2130 2177
2131 trace_i915_gem_request_add(ring, request->seqno); 2178 trace_i915_gem_request_add(ring, request->seqno);
2132 ring->outstanding_lazy_request = 0; 2179 ring->outstanding_lazy_seqno = 0;
2180 ring->preallocated_lazy_request = NULL;
2133 2181
2134 if (!dev_priv->ums.mm_suspended) { 2182 if (!dev_priv->ums.mm_suspended) {
2135 i915_queue_hangcheck(ring->dev); 2183 i915_queue_hangcheck(ring->dev);
2136 2184
2137 if (was_empty) { 2185 if (was_empty) {
2186 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2138 queue_delayed_work(dev_priv->wq, 2187 queue_delayed_work(dev_priv->wq,
2139 &dev_priv->mm.retire_work, 2188 &dev_priv->mm.retire_work,
2140 round_jiffies_up_relative(HZ)); 2189 round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2156 return; 2205 return;
2157 2206
2158 spin_lock(&file_priv->mm.lock); 2207 spin_lock(&file_priv->mm.lock);
2159 if (request->file_priv) { 2208 list_del(&request->client_list);
2160 list_del(&request->client_list); 2209 request->file_priv = NULL;
2161 request->file_priv = NULL;
2162 }
2163 spin_unlock(&file_priv->mm.lock); 2210 spin_unlock(&file_priv->mm.lock);
2164} 2211}
2165 2212
@@ -2224,6 +2271,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
2224 return false; 2271 return false;
2225} 2272}
2226 2273
2274static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2275{
2276 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2277
2278 if (hs->banned)
2279 return true;
2280
2281 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2282 DRM_ERROR("context hanging too fast, declaring banned!\n");
2283 return true;
2284 }
2285
2286 return false;
2287}
2288
2227static void i915_set_reset_status(struct intel_ring_buffer *ring, 2289static void i915_set_reset_status(struct intel_ring_buffer *ring,
2228 struct drm_i915_gem_request *request, 2290 struct drm_i915_gem_request *request,
2229 u32 acthd) 2291 u32 acthd)
@@ -2260,10 +2322,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
2260 hs = &request->file_priv->hang_stats; 2322 hs = &request->file_priv->hang_stats;
2261 2323
2262 if (hs) { 2324 if (hs) {
2263 if (guilty) 2325 if (guilty) {
2326 hs->banned = i915_context_is_banned(hs);
2264 hs->batch_active++; 2327 hs->batch_active++;
2265 else 2328 hs->guilty_ts = get_seconds();
2329 } else {
2266 hs->batch_pending++; 2330 hs->batch_pending++;
2331 }
2267 } 2332 }
2268} 2333}
2269 2334
@@ -2341,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev)
2341 for_each_ring(ring, dev_priv, i) 2406 for_each_ring(ring, dev_priv, i)
2342 i915_gem_reset_ring_lists(dev_priv, ring); 2407 i915_gem_reset_ring_lists(dev_priv, ring);
2343 2408
2409 i915_gem_cleanup_ringbuffer(dev);
2410
2344 i915_gem_restore_fences(dev); 2411 i915_gem_restore_fences(dev);
2345} 2412}
2346 2413
@@ -2405,57 +2472,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2405 WARN_ON(i915_verify_lists(ring->dev)); 2472 WARN_ON(i915_verify_lists(ring->dev));
2406} 2473}
2407 2474
2408void 2475bool
2409i915_gem_retire_requests(struct drm_device *dev) 2476i915_gem_retire_requests(struct drm_device *dev)
2410{ 2477{
2411 drm_i915_private_t *dev_priv = dev->dev_private; 2478 drm_i915_private_t *dev_priv = dev->dev_private;
2412 struct intel_ring_buffer *ring; 2479 struct intel_ring_buffer *ring;
2480 bool idle = true;
2413 int i; 2481 int i;
2414 2482
2415 for_each_ring(ring, dev_priv, i) 2483 for_each_ring(ring, dev_priv, i) {
2416 i915_gem_retire_requests_ring(ring); 2484 i915_gem_retire_requests_ring(ring);
2485 idle &= list_empty(&ring->request_list);
2486 }
2487
2488 if (idle)
2489 mod_delayed_work(dev_priv->wq,
2490 &dev_priv->mm.idle_work,
2491 msecs_to_jiffies(100));
2492
2493 return idle;
2417} 2494}
2418 2495
2419static void 2496static void
2420i915_gem_retire_work_handler(struct work_struct *work) 2497i915_gem_retire_work_handler(struct work_struct *work)
2421{ 2498{
2422 drm_i915_private_t *dev_priv; 2499 struct drm_i915_private *dev_priv =
2423 struct drm_device *dev; 2500 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2424 struct intel_ring_buffer *ring; 2501 struct drm_device *dev = dev_priv->dev;
2425 bool idle; 2502 bool idle;
2426 int i;
2427
2428 dev_priv = container_of(work, drm_i915_private_t,
2429 mm.retire_work.work);
2430 dev = dev_priv->dev;
2431 2503
2432 /* Come back later if the device is busy... */ 2504 /* Come back later if the device is busy... */
2433 if (!mutex_trylock(&dev->struct_mutex)) { 2505 idle = false;
2434 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2506 if (mutex_trylock(&dev->struct_mutex)) {
2435 round_jiffies_up_relative(HZ)); 2507 idle = i915_gem_retire_requests(dev);
2436 return; 2508 mutex_unlock(&dev->struct_mutex);
2437 }
2438
2439 i915_gem_retire_requests(dev);
2440
2441 /* Send a periodic flush down the ring so we don't hold onto GEM
2442 * objects indefinitely.
2443 */
2444 idle = true;
2445 for_each_ring(ring, dev_priv, i) {
2446 if (ring->gpu_caches_dirty)
2447 i915_add_request(ring, NULL);
2448
2449 idle &= list_empty(&ring->request_list);
2450 } 2509 }
2451 2510 if (!idle)
2452 if (!dev_priv->ums.mm_suspended && !idle)
2453 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2511 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2454 round_jiffies_up_relative(HZ)); 2512 round_jiffies_up_relative(HZ));
2455 if (idle) 2513}
2456 intel_mark_idle(dev);
2457 2514
2458 mutex_unlock(&dev->struct_mutex); 2515static void
2516i915_gem_idle_work_handler(struct work_struct *work)
2517{
2518 struct drm_i915_private *dev_priv =
2519 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2520
2521 intel_mark_idle(dev_priv->dev);
2459} 2522}
2460 2523
2461/** 2524/**
@@ -2553,7 +2616,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2553 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2616 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2554 mutex_unlock(&dev->struct_mutex); 2617 mutex_unlock(&dev->struct_mutex);
2555 2618
2556 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2619 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2557 if (timeout) 2620 if (timeout)
2558 args->timeout_ns = timespec_to_ns(timeout); 2621 args->timeout_ns = timespec_to_ns(timeout);
2559 return ret; 2622 return ret;
@@ -2600,6 +2663,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2600 if (ret) 2663 if (ret)
2601 return ret; 2664 return ret;
2602 2665
2666 trace_i915_gem_ring_sync_to(from, to, seqno);
2603 ret = to->sync_to(to, from, seqno); 2667 ret = to->sync_to(to, from, seqno);
2604 if (!ret) 2668 if (!ret)
2605 /* We use last_read_seqno because sync_to() 2669 /* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@ int i915_vma_unbind(struct i915_vma *vma)
2641 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2705 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2642 int ret; 2706 int ret;
2643 2707
2708 /* For now we only ever use 1 vma per object */
2709 WARN_ON(!list_is_singular(&obj->vma_list));
2710
2644 if (list_empty(&vma->vma_link)) 2711 if (list_empty(&vma->vma_link))
2645 return 0; 2712 return 0;
2646 2713
2647 if (!drm_mm_node_allocated(&vma->node)) 2714 if (!drm_mm_node_allocated(&vma->node)) {
2648 goto destroy; 2715 i915_gem_vma_destroy(vma);
2716
2717 return 0;
2718 }
2649 2719
2650 if (obj->pin_count) 2720 if (obj->pin_count)
2651 return -EBUSY; 2721 return -EBUSY;
@@ -2685,13 +2755,10 @@ int i915_vma_unbind(struct i915_vma *vma)
2685 2755
2686 drm_mm_remove_node(&vma->node); 2756 drm_mm_remove_node(&vma->node);
2687 2757
2688destroy:
2689 i915_gem_vma_destroy(vma); 2758 i915_gem_vma_destroy(vma);
2690 2759
2691 /* Since the unbound list is global, only move to that list if 2760 /* Since the unbound list is global, only move to that list if
2692 * no more VMAs exist. 2761 * no more VMAs exist. */
2693 * NB: Until we have real VMAs there will only ever be one */
2694 WARN_ON(!list_empty(&obj->vma_list));
2695 if (list_empty(&obj->vma_list)) 2762 if (list_empty(&obj->vma_list))
2696 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2763 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2697 2764
@@ -3389,8 +3456,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3389 3456
3390 /* And bump the LRU for this access */ 3457 /* And bump the LRU for this access */
3391 if (i915_gem_object_is_inactive(obj)) { 3458 if (i915_gem_object_is_inactive(obj)) {
3392 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 3459 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3393 &dev_priv->gtt.base);
3394 if (vma) 3460 if (vma)
3395 list_move_tail(&vma->mm_list, 3461 list_move_tail(&vma->mm_list,
3396 &dev_priv->gtt.base.inactive_list); 3462 &dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3827,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3761 if (seqno == 0) 3827 if (seqno == 0)
3762 return 0; 3828 return 0;
3763 3829
3764 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 3830 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3765 if (ret == 0) 3831 if (ret == 0)
3766 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3832 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3767 3833
@@ -3865,6 +3931,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3865 goto out; 3931 goto out;
3866 } 3932 }
3867 3933
3934 if (obj->user_pin_count == ULONG_MAX) {
3935 ret = -EBUSY;
3936 goto out;
3937 }
3938
3868 if (obj->user_pin_count == 0) { 3939 if (obj->user_pin_count == 0) {
3869 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); 3940 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3870 if (ret) 3941 if (ret)
@@ -4015,7 +4086,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4015{ 4086{
4016 INIT_LIST_HEAD(&obj->global_list); 4087 INIT_LIST_HEAD(&obj->global_list);
4017 INIT_LIST_HEAD(&obj->ring_list); 4088 INIT_LIST_HEAD(&obj->ring_list);
4018 INIT_LIST_HEAD(&obj->exec_list);
4019 INIT_LIST_HEAD(&obj->obj_exec_link); 4089 INIT_LIST_HEAD(&obj->obj_exec_link);
4020 INIT_LIST_HEAD(&obj->vma_list); 4090 INIT_LIST_HEAD(&obj->vma_list);
4021 4091
@@ -4087,13 +4157,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4087 return obj; 4157 return obj;
4088} 4158}
4089 4159
4090int i915_gem_init_object(struct drm_gem_object *obj)
4091{
4092 BUG();
4093
4094 return 0;
4095}
4096
4097void i915_gem_free_object(struct drm_gem_object *gem_obj) 4160void i915_gem_free_object(struct drm_gem_object *gem_obj)
4098{ 4161{
4099 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4162 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,9 +4210,20 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4147 i915_gem_object_free(obj); 4210 i915_gem_object_free(obj);
4148} 4211}
4149 4212
4150struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, 4213struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4151 struct i915_address_space *vm) 4214 struct i915_address_space *vm)
4152{ 4215{
4216 struct i915_vma *vma;
4217 list_for_each_entry(vma, &obj->vma_list, vma_link)
4218 if (vma->vm == vm)
4219 return vma;
4220
4221 return NULL;
4222}
4223
4224static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4225 struct i915_address_space *vm)
4226{
4153 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 4227 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4154 if (vma == NULL) 4228 if (vma == NULL)
4155 return ERR_PTR(-ENOMEM); 4229 return ERR_PTR(-ENOMEM);
@@ -4169,76 +4243,103 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4169 return vma; 4243 return vma;
4170} 4244}
4171 4245
4246struct i915_vma *
4247i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4248 struct i915_address_space *vm)
4249{
4250 struct i915_vma *vma;
4251
4252 vma = i915_gem_obj_to_vma(obj, vm);
4253 if (!vma)
4254 vma = __i915_gem_vma_create(obj, vm);
4255
4256 return vma;
4257}
4258
4172void i915_gem_vma_destroy(struct i915_vma *vma) 4259void i915_gem_vma_destroy(struct i915_vma *vma)
4173{ 4260{
4174 WARN_ON(vma->node.allocated); 4261 WARN_ON(vma->node.allocated);
4262
4263 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4264 if (!list_empty(&vma->exec_list))
4265 return;
4266
4175 list_del(&vma->vma_link); 4267 list_del(&vma->vma_link);
4268
4176 kfree(vma); 4269 kfree(vma);
4177} 4270}
4178 4271
4179int 4272int
4180i915_gem_idle(struct drm_device *dev) 4273i915_gem_suspend(struct drm_device *dev)
4181{ 4274{
4182 drm_i915_private_t *dev_priv = dev->dev_private; 4275 drm_i915_private_t *dev_priv = dev->dev_private;
4183 int ret; 4276 int ret = 0;
4184 4277
4185 if (dev_priv->ums.mm_suspended) { 4278 mutex_lock(&dev->struct_mutex);
4186 mutex_unlock(&dev->struct_mutex); 4279 if (dev_priv->ums.mm_suspended)
4187 return 0; 4280 goto err;
4188 }
4189 4281
4190 ret = i915_gpu_idle(dev); 4282 ret = i915_gpu_idle(dev);
4191 if (ret) { 4283 if (ret)
4192 mutex_unlock(&dev->struct_mutex); 4284 goto err;
4193 return ret; 4285
4194 }
4195 i915_gem_retire_requests(dev); 4286 i915_gem_retire_requests(dev);
4196 4287
4197 /* Under UMS, be paranoid and evict. */ 4288 /* Under UMS, be paranoid and evict. */
4198 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4289 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4199 i915_gem_evict_everything(dev); 4290 i915_gem_evict_everything(dev);
4200 4291
4201 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4202
4203 i915_kernel_lost_context(dev); 4292 i915_kernel_lost_context(dev);
4204 i915_gem_cleanup_ringbuffer(dev); 4293 i915_gem_cleanup_ringbuffer(dev);
4205 4294
4206 /* Cancel the retire work handler, which should be idle now. */ 4295 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4296 * We need to replace this with a semaphore, or something.
4297 * And not confound ums.mm_suspended!
4298 */
4299 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4300 DRIVER_MODESET);
4301 mutex_unlock(&dev->struct_mutex);
4302
4303 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4207 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4304 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4305 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4208 4306
4209 return 0; 4307 return 0;
4308
4309err:
4310 mutex_unlock(&dev->struct_mutex);
4311 return ret;
4210} 4312}
4211 4313
4212void i915_gem_l3_remap(struct drm_device *dev) 4314int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4213{ 4315{
4316 struct drm_device *dev = ring->dev;
4214 drm_i915_private_t *dev_priv = dev->dev_private; 4317 drm_i915_private_t *dev_priv = dev->dev_private;
4215 u32 misccpctl; 4318 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4216 int i; 4319 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4217 4320 int i, ret;
4218 if (!HAS_L3_GPU_CACHE(dev))
4219 return;
4220 4321
4221 if (!dev_priv->l3_parity.remap_info) 4322 if (!HAS_L3_DPF(dev) || !remap_info)
4222 return; 4323 return 0;
4223 4324
4224 misccpctl = I915_READ(GEN7_MISCCPCTL); 4325 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4225 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 4326 if (ret)
4226 POSTING_READ(GEN7_MISCCPCTL); 4327 return ret;
4227 4328
4329 /*
4330 * Note: We do not worry about the concurrent register cacheline hang
4331 * here because no other code should access these registers other than
4332 * at initialization time.
4333 */
4228 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4334 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4229 u32 remap = I915_READ(GEN7_L3LOG_BASE + i); 4335 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4230 if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) 4336 intel_ring_emit(ring, reg_base + i);
4231 DRM_DEBUG("0x%x was already programmed to %x\n", 4337 intel_ring_emit(ring, remap_info[i/4]);
4232 GEN7_L3LOG_BASE + i, remap);
4233 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4234 DRM_DEBUG_DRIVER("Clearing remapped register\n");
4235 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4236 } 4338 }
4237 4339
4238 /* Make sure all the writes land before disabling dop clock gating */ 4340 intel_ring_advance(ring);
4239 POSTING_READ(GEN7_L3LOG_BASE);
4240 4341
4241 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 4342 return ret;
4242} 4343}
4243 4344
4244void i915_gem_init_swizzling(struct drm_device *dev) 4345void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4330,7 +4431,7 @@ int
4330i915_gem_init_hw(struct drm_device *dev) 4431i915_gem_init_hw(struct drm_device *dev)
4331{ 4432{
4332 drm_i915_private_t *dev_priv = dev->dev_private; 4433 drm_i915_private_t *dev_priv = dev->dev_private;
4333 int ret; 4434 int ret, i;
4334 4435
4335 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4436 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4336 return -EIO; 4437 return -EIO;
@@ -4338,20 +4439,26 @@ i915_gem_init_hw(struct drm_device *dev)
4338 if (dev_priv->ellc_size) 4439 if (dev_priv->ellc_size)
4339 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4440 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4340 4441
4442 if (IS_HSW_GT3(dev))
4443 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
4444 else
4445 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4446
4341 if (HAS_PCH_NOP(dev)) { 4447 if (HAS_PCH_NOP(dev)) {
4342 u32 temp = I915_READ(GEN7_MSG_CTL); 4448 u32 temp = I915_READ(GEN7_MSG_CTL);
4343 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4449 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4344 I915_WRITE(GEN7_MSG_CTL, temp); 4450 I915_WRITE(GEN7_MSG_CTL, temp);
4345 } 4451 }
4346 4452
4347 i915_gem_l3_remap(dev);
4348
4349 i915_gem_init_swizzling(dev); 4453 i915_gem_init_swizzling(dev);
4350 4454
4351 ret = i915_gem_init_rings(dev); 4455 ret = i915_gem_init_rings(dev);
4352 if (ret) 4456 if (ret)
4353 return ret; 4457 return ret;
4354 4458
4459 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4460 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4461
4355 /* 4462 /*
4356 * XXX: There was some w/a described somewhere suggesting loading 4463 * XXX: There was some w/a described somewhere suggesting loading
4357 * contexts before PPGTT. 4464 * contexts before PPGTT.
@@ -4454,26 +4561,12 @@ int
4454i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4561i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4455 struct drm_file *file_priv) 4562 struct drm_file *file_priv)
4456{ 4563{
4457 struct drm_i915_private *dev_priv = dev->dev_private;
4458 int ret;
4459
4460 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4564 if (drm_core_check_feature(dev, DRIVER_MODESET))
4461 return 0; 4565 return 0;
4462 4566
4463 drm_irq_uninstall(dev); 4567 drm_irq_uninstall(dev);
4464 4568
4465 mutex_lock(&dev->struct_mutex); 4569 return i915_gem_suspend(dev);
4466 ret = i915_gem_idle(dev);
4467
4468 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4469 * We need to replace this with a semaphore, or something.
4470 * And not confound ums.mm_suspended!
4471 */
4472 if (ret != 0)
4473 dev_priv->ums.mm_suspended = 1;
4474 mutex_unlock(&dev->struct_mutex);
4475
4476 return ret;
4477} 4570}
4478 4571
4479void 4572void
@@ -4484,11 +4577,9 @@ i915_gem_lastclose(struct drm_device *dev)
4484 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4577 if (drm_core_check_feature(dev, DRIVER_MODESET))
4485 return; 4578 return;
4486 4579
4487 mutex_lock(&dev->struct_mutex); 4580 ret = i915_gem_suspend(dev);
4488 ret = i915_gem_idle(dev);
4489 if (ret) 4581 if (ret)
4490 DRM_ERROR("failed to idle hardware: %d\n", ret); 4582 DRM_ERROR("failed to idle hardware: %d\n", ret);
4491 mutex_unlock(&dev->struct_mutex);
4492} 4583}
4493 4584
4494static void 4585static void
@@ -4523,6 +4614,7 @@ i915_gem_load(struct drm_device *dev)
4523 INIT_LIST_HEAD(&dev_priv->vm_list); 4614 INIT_LIST_HEAD(&dev_priv->vm_list);
4524 i915_init_vm(dev_priv, &dev_priv->gtt.base); 4615 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4525 4616
4617 INIT_LIST_HEAD(&dev_priv->context_list);
4526 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4618 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4527 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4619 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4528 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4620 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4624,8 @@ i915_gem_load(struct drm_device *dev)
4532 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4624 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4533 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4625 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4534 i915_gem_retire_work_handler); 4626 i915_gem_retire_work_handler);
4627 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4628 i915_gem_idle_work_handler);
4535 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4629 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4536 4630
4537 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 4631 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4676,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
4582 if (dev_priv->mm.phys_objs[id - 1] || !size) 4676 if (dev_priv->mm.phys_objs[id - 1] || !size)
4583 return 0; 4677 return 0;
4584 4678
4585 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); 4679 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4586 if (!phys_obj) 4680 if (!phys_obj)
4587 return -ENOMEM; 4681 return -ENOMEM;
4588 4682
@@ -4756,6 +4850,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4756{ 4850{
4757 struct drm_i915_file_private *file_priv = file->driver_priv; 4851 struct drm_i915_file_private *file_priv = file->driver_priv;
4758 4852
4853 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4854
4759 /* Clean up our request list when the client is going away, so that 4855 /* Clean up our request list when the client is going away, so that
4760 * later retire_requests won't dereference our soon-to-be-gone 4856 * later retire_requests won't dereference our soon-to-be-gone
4761 * file_priv. 4857 * file_priv.
@@ -4773,6 +4869,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4773 spin_unlock(&file_priv->mm.lock); 4869 spin_unlock(&file_priv->mm.lock);
4774} 4870}
4775 4871
4872static void
4873i915_gem_file_idle_work_handler(struct work_struct *work)
4874{
4875 struct drm_i915_file_private *file_priv =
4876 container_of(work, typeof(*file_priv), mm.idle_work.work);
4877
4878 atomic_set(&file_priv->rps_wait_boost, false);
4879}
4880
4881int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4882{
4883 struct drm_i915_file_private *file_priv;
4884
4885 DRM_DEBUG_DRIVER("\n");
4886
4887 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4888 if (!file_priv)
4889 return -ENOMEM;
4890
4891 file->driver_priv = file_priv;
4892 file_priv->dev_priv = dev->dev_private;
4893
4894 spin_lock_init(&file_priv->mm.lock);
4895 INIT_LIST_HEAD(&file_priv->mm.request_list);
4896 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4897 i915_gem_file_idle_work_handler);
4898
4899 idr_init(&file_priv->context_idr);
4900
4901 return 0;
4902}
4903
4776static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 4904static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4777{ 4905{
4778 if (!mutex_is_locked(mutex)) 4906 if (!mutex_is_locked(mutex))
@@ -4823,6 +4951,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4823 4951
4824 if (unlock) 4952 if (unlock)
4825 mutex_unlock(&dev->struct_mutex); 4953 mutex_unlock(&dev->struct_mutex);
4954
4826 return count; 4955 return count;
4827} 4956}
4828 4957
@@ -4859,11 +4988,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4859 4988
4860bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) 4989bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4861{ 4990{
4862 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4991 struct i915_vma *vma;
4863 struct i915_address_space *vm;
4864 4992
4865 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 4993 list_for_each_entry(vma, &o->vma_list, vma_link)
4866 if (i915_gem_obj_bound(o, vm)) 4994 if (drm_mm_node_allocated(&vma->node))
4867 return true; 4995 return true;
4868 4996
4869 return false; 4997 return false;
@@ -4895,7 +5023,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4895 struct drm_i915_private, 5023 struct drm_i915_private,
4896 mm.inactive_shrinker); 5024 mm.inactive_shrinker);
4897 struct drm_device *dev = dev_priv->dev; 5025 struct drm_device *dev = dev_priv->dev;
4898 int nr_to_scan = sc->nr_to_scan;
4899 unsigned long freed; 5026 unsigned long freed;
4900 bool unlock = true; 5027 bool unlock = true;
4901 5028
@@ -4909,38 +5036,30 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4909 unlock = false; 5036 unlock = false;
4910 } 5037 }
4911 5038
4912 freed = i915_gem_purge(dev_priv, nr_to_scan); 5039 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
4913 if (freed < nr_to_scan) 5040 if (freed < sc->nr_to_scan)
4914 freed += __i915_gem_shrink(dev_priv, nr_to_scan, 5041 freed += __i915_gem_shrink(dev_priv,
4915 false); 5042 sc->nr_to_scan - freed,
4916 if (freed < nr_to_scan) 5043 false);
5044 if (freed < sc->nr_to_scan)
4917 freed += i915_gem_shrink_all(dev_priv); 5045 freed += i915_gem_shrink_all(dev_priv);
4918 5046
4919 if (unlock) 5047 if (unlock)
4920 mutex_unlock(&dev->struct_mutex); 5048 mutex_unlock(&dev->struct_mutex);
5049
4921 return freed; 5050 return freed;
4922} 5051}
4923 5052
4924struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 5053struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
4925 struct i915_address_space *vm)
4926{ 5054{
4927 struct i915_vma *vma; 5055 struct i915_vma *vma;
4928 list_for_each_entry(vma, &obj->vma_list, vma_link)
4929 if (vma->vm == vm)
4930 return vma;
4931 5056
4932 return NULL; 5057 if (WARN_ON(list_empty(&obj->vma_list)))
4933} 5058 return NULL;
4934
4935struct i915_vma *
4936i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4937 struct i915_address_space *vm)
4938{
4939 struct i915_vma *vma;
4940 5059
4941 vma = i915_gem_obj_to_vma(obj, vm); 5060 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
4942 if (!vma) 5061 if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
4943 vma = i915_gem_vma_create(obj, vm); 5062 return NULL;
4944 5063
4945 return vma; 5064 return vma;
4946} 5065}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c2a7d6..cc619c138777 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
73 * 73 *
74 * There are two confusing terms used above: 74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the 75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded it's state already and has stored away the gtt 76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this 77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this 78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset. 79 * is to do a GPU reset.
@@ -129,6 +129,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
129 struct i915_hw_context *ctx = container_of(ctx_ref, 129 struct i915_hw_context *ctx = container_of(ctx_ref,
130 typeof(*ctx), ref); 130 typeof(*ctx), ref);
131 131
132 list_del(&ctx->link);
132 drm_gem_object_unreference(&ctx->obj->base); 133 drm_gem_object_unreference(&ctx->obj->base);
133 kfree(ctx); 134 kfree(ctx);
134} 135}
@@ -147,6 +148,7 @@ create_hw_context(struct drm_device *dev,
147 148
148 kref_init(&ctx->ref); 149 kref_init(&ctx->ref);
149 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 150 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
151 INIT_LIST_HEAD(&ctx->link);
150 if (ctx->obj == NULL) { 152 if (ctx->obj == NULL) {
151 kfree(ctx); 153 kfree(ctx);
152 DRM_DEBUG_DRIVER("Context object allocated failed\n"); 154 DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +168,7 @@ create_hw_context(struct drm_device *dev,
166 * assertion in the context switch code. 168 * assertion in the context switch code.
167 */ 169 */
168 ctx->ring = &dev_priv->ring[RCS]; 170 ctx->ring = &dev_priv->ring[RCS];
171 list_add_tail(&ctx->link, &dev_priv->context_list);
169 172
170 /* Default context will never have a file_priv */ 173 /* Default context will never have a file_priv */
171 if (file_priv == NULL) 174 if (file_priv == NULL)
@@ -178,6 +181,10 @@ create_hw_context(struct drm_device *dev,
178 181
179 ctx->file_priv = file_priv; 182 ctx->file_priv = file_priv;
180 ctx->id = ret; 183 ctx->id = ret;
184 /* NB: Mark all slices as needing a remap so that when the context first
185 * loads it will restore whatever remap state already exists. If there
186 * is no remap info, it will be a NOP. */
187 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
181 188
182 return ctx; 189 return ctx;
183 190
@@ -213,7 +220,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
213 * may not be available. To avoid this we always pin the 220 * may not be available. To avoid this we always pin the
214 * default context. 221 * default context.
215 */ 222 */
216 dev_priv->ring[RCS].default_context = ctx;
217 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); 223 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
218 if (ret) { 224 if (ret) {
219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 225 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +232,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
226 goto err_unpin; 232 goto err_unpin;
227 } 233 }
228 234
235 dev_priv->ring[RCS].default_context = ctx;
236
229 DRM_DEBUG_DRIVER("Default HW context loaded\n"); 237 DRM_DEBUG_DRIVER("Default HW context loaded\n");
230 return 0; 238 return 0;
231 239
@@ -281,16 +289,24 @@ void i915_gem_context_fini(struct drm_device *dev)
281 * other code, leading to spurious errors. */ 289 * other code, leading to spurious errors. */
282 intel_gpu_reset(dev); 290 intel_gpu_reset(dev);
283 291
284 i915_gem_object_unpin(dctx->obj);
285
286 /* When default context is created and switched to, base object refcount 292 /* When default context is created and switched to, base object refcount
287 * will be 2 (+1 from object creation and +1 from do_switch()). 293 * will be 2 (+1 from object creation and +1 from do_switch()).
288 * i915_gem_context_fini() will be called after gpu_idle() has switched 294 * i915_gem_context_fini() will be called after gpu_idle() has switched
289 * to default context. So we need to unreference the base object once 295 * to default context. So we need to unreference the base object once
290 * to offset the do_switch part, so that i915_gem_context_unreference() 296 * to offset the do_switch part, so that i915_gem_context_unreference()
291 * can then free the base object correctly. */ 297 * can then free the base object correctly. */
292 drm_gem_object_unreference(&dctx->obj->base); 298 WARN_ON(!dev_priv->ring[RCS].last_context);
299 if (dev_priv->ring[RCS].last_context == dctx) {
300 /* Fake switch to NULL context */
301 WARN_ON(dctx->obj->active);
302 i915_gem_object_unpin(dctx->obj);
303 i915_gem_context_unreference(dctx);
304 }
305
306 i915_gem_object_unpin(dctx->obj);
293 i915_gem_context_unreference(dctx); 307 i915_gem_context_unreference(dctx);
308 dev_priv->ring[RCS].default_context = NULL;
309 dev_priv->ring[RCS].last_context = NULL;
294} 310}
295 311
296static int context_idr_cleanup(int id, void *p, void *data) 312static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +409,11 @@ static int do_switch(struct i915_hw_context *to)
393 struct intel_ring_buffer *ring = to->ring; 409 struct intel_ring_buffer *ring = to->ring;
394 struct i915_hw_context *from = ring->last_context; 410 struct i915_hw_context *from = ring->last_context;
395 u32 hw_flags = 0; 411 u32 hw_flags = 0;
396 int ret; 412 int ret, i;
397 413
398 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); 414 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
399 415
400 if (from == to) 416 if (from == to && !to->remap_slice)
401 return 0; 417 return 0;
402 418
403 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); 419 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +436,6 @@ static int do_switch(struct i915_hw_context *to)
420 436
421 if (!to->is_initialized || is_default_context(to)) 437 if (!to->is_initialized || is_default_context(to))
422 hw_flags |= MI_RESTORE_INHIBIT; 438 hw_flags |= MI_RESTORE_INHIBIT;
423 else if (WARN_ON_ONCE(from == to)) /* not yet expected */
424 hw_flags |= MI_FORCE_RESTORE;
425 439
426 ret = mi_set_context(ring, to, hw_flags); 440 ret = mi_set_context(ring, to, hw_flags);
427 if (ret) { 441 if (ret) {
@@ -429,6 +443,18 @@ static int do_switch(struct i915_hw_context *to)
429 return ret; 443 return ret;
430 } 444 }
431 445
446 for (i = 0; i < MAX_L3_SLICES; i++) {
447 if (!(to->remap_slice & (1<<i)))
448 continue;
449
450 ret = i915_gem_l3_remap(ring, i);
451 /* If it failed, try again next round */
452 if (ret)
453 DRM_DEBUG_DRIVER("L3 remapping failed\n");
454 else
455 to->remap_slice &= ~(1<<i);
456 }
457
432 /* The backing object for the context is done after switching to the 458 /* The backing object for the context is done after switching to the
433 * *next* context. Therefore we cannot retire the previous context until 459 * *next* context. Therefore we cannot retire the previous context until
434 * the next context has already started running. In fact, the below code 460 * the next context has already started running. In fact, the below code
@@ -436,11 +462,8 @@ static int do_switch(struct i915_hw_context *to)
436 * MI_SET_CONTEXT instead of when the next seqno has completed. 462 * MI_SET_CONTEXT instead of when the next seqno has completed.
437 */ 463 */
438 if (from != NULL) { 464 if (from != NULL) {
439 struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
440 struct i915_address_space *ggtt = &dev_priv->gtt.base;
441 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 465 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
442 list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); 466 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
443 i915_gem_object_move_to_active(from->obj, ring);
444 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 467 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
445 * whole damn pipeline, we don't need to explicitly mark the 468 * whole damn pipeline, we don't need to explicitly mark the
446 * object dirty. The only exception is that the context must be 469 * object dirty. The only exception is that the context must be
@@ -451,17 +474,7 @@ static int do_switch(struct i915_hw_context *to)
451 from->obj->dirty = 1; 474 from->obj->dirty = 1;
452 BUG_ON(from->obj->ring != ring); 475 BUG_ON(from->obj->ring != ring);
453 476
454 ret = i915_add_request(ring, NULL); 477 /* obj is kept alive until the next request by its active ref */
455 if (ret) {
456 /* Too late, we've already scheduled a context switch.
457 * Try to undo the change so that the hw state is
458 * consistent with out tracking. In case of emergency,
459 * scream.
460 */
461 WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
462 return ret;
463 }
464
465 i915_gem_object_unpin(from->obj); 478 i915_gem_object_unpin(from->obj);
466 i915_gem_context_unreference(from); 479 i915_gem_context_unreference(from);
467 } 480 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 91b700155850..b7376533633d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
37 if (vma->obj->pin_count) 37 if (vma->obj->pin_count)
38 return false; 38 return false;
39 39
40 if (WARN_ON(!list_empty(&vma->exec_list)))
41 return false;
42
40 list_add(&vma->exec_list, unwind); 43 list_add(&vma->exec_list, unwind);
41 return drm_mm_scan_add_block(&vma->node); 44 return drm_mm_scan_add_block(&vma->node);
42} 45}
@@ -113,7 +116,7 @@ none:
113 } 116 }
114 117
115 /* We expect the caller to unpin, evict all and try again, or give up. 118 /* We expect the caller to unpin, evict all and try again, or give up.
116 * So calling i915_gem_evict_everything() is unnecessary. 119 * So calling i915_gem_evict_vm() is unnecessary.
117 */ 120 */
118 return -ENOSPC; 121 return -ENOSPC;
119 122
@@ -152,12 +155,48 @@ found:
152 return ret; 155 return ret;
153} 156}
154 157
158/**
159 * i915_gem_evict_vm - Try to free up VM space
160 *
161 * @vm: Address space to evict from
162 * @do_idle: Boolean directing whether to idle first.
163 *
164 * VM eviction is about freeing up virtual address space. If one wants fine
165 * grained eviction, they should see evict something for more details. In terms
166 * of freeing up actual system memory, this function may not accomplish the
167 * desired result. An object may be shared in multiple address space, and this
168 * function will not assert those objects be freed.
169 *
170 * Using do_idle will result in a more complete eviction because it retires, and
171 * inactivates current BOs.
172 */
173int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
174{
175 struct i915_vma *vma, *next;
176 int ret;
177
178 trace_i915_gem_evict_vm(vm);
179
180 if (do_idle) {
181 ret = i915_gpu_idle(vm->dev);
182 if (ret)
183 return ret;
184
185 i915_gem_retire_requests(vm->dev);
186 }
187
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191
192 return 0;
193}
194
155int 195int
156i915_gem_evict_everything(struct drm_device *dev) 196i915_gem_evict_everything(struct drm_device *dev)
157{ 197{
158 drm_i915_private_t *dev_priv = dev->dev_private; 198 drm_i915_private_t *dev_priv = dev->dev_private;
159 struct i915_address_space *vm; 199 struct i915_address_space *vm;
160 struct i915_vma *vma, *next;
161 bool lists_empty = true; 200 bool lists_empty = true;
162 int ret; 201 int ret;
163 202
@@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
184 i915_gem_retire_requests(dev); 223 i915_gem_retire_requests(dev);
185 224
186 /* Having flushed everything, unbind() should never raise an error */ 225 /* Having flushed everything, unbind() should never raise an error */
187 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 226 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) 227 WARN_ON(i915_gem_evict_vm(vm, false));
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191 }
192 228
193 return 0; 229 return 0;
194} 230}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf345777ae9f..0ce0d47e4b0f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36struct eb_objects { 36struct eb_vmas {
37 struct list_head objects; 37 struct list_head vmas;
38 int and; 38 int and;
39 union { 39 union {
40 struct drm_i915_gem_object *lut[0]; 40 struct i915_vma *lut[0];
41 struct hlist_head buckets[0]; 41 struct hlist_head buckets[0];
42 }; 42 };
43}; 43};
44 44
45static struct eb_objects * 45static struct eb_vmas *
46eb_create(struct drm_i915_gem_execbuffer2 *args) 46eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
47{ 47{
48 struct eb_objects *eb = NULL; 48 struct eb_vmas *eb = NULL;
49 49
50 if (args->flags & I915_EXEC_HANDLE_LUT) { 50 if (args->flags & I915_EXEC_HANDLE_LUT) {
51 int size = args->buffer_count; 51 unsigned size = args->buffer_count;
52 size *= sizeof(struct drm_i915_gem_object *); 52 size *= sizeof(struct i915_vma *);
53 size += sizeof(struct eb_objects); 53 size += sizeof(struct eb_vmas);
54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55 } 55 }
56 56
57 if (eb == NULL) { 57 if (eb == NULL) {
58 int size = args->buffer_count; 58 unsigned size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 59 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); 60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 while (count > 2*size) 61 while (count > 2*size)
62 count >>= 1; 62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) + 63 eb = kzalloc(count*sizeof(struct hlist_head) +
64 sizeof(struct eb_objects), 64 sizeof(struct eb_vmas),
65 GFP_TEMPORARY); 65 GFP_TEMPORARY);
66 if (eb == NULL) 66 if (eb == NULL)
67 return eb; 67 return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
70 } else 70 } else
71 eb->and = -args->buffer_count; 71 eb->and = -args->buffer_count;
72 72
73 INIT_LIST_HEAD(&eb->objects); 73 INIT_LIST_HEAD(&eb->vmas);
74 return eb; 74 return eb;
75} 75}
76 76
77static void 77static void
78eb_reset(struct eb_objects *eb) 78eb_reset(struct eb_vmas *eb)
79{ 79{
80 if (eb->and >= 0) 80 if (eb->and >= 0)
81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); 81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82} 82}
83 83
84static int 84static int
85eb_lookup_objects(struct eb_objects *eb, 85eb_lookup_vmas(struct eb_vmas *eb,
86 struct drm_i915_gem_exec_object2 *exec, 86 struct drm_i915_gem_exec_object2 *exec,
87 const struct drm_i915_gem_execbuffer2 *args, 87 const struct drm_i915_gem_execbuffer2 *args,
88 struct drm_file *file) 88 struct i915_address_space *vm,
89 struct drm_file *file)
89{ 90{
90 int i; 91 struct drm_i915_gem_object *obj;
92 struct list_head objects;
93 int i, ret = 0;
91 94
95 INIT_LIST_HEAD(&objects);
92 spin_lock(&file->table_lock); 96 spin_lock(&file->table_lock);
97 /* Grab a reference to the object and release the lock so we can lookup
98 * or create the VMA without using GFP_ATOMIC */
93 for (i = 0; i < args->buffer_count; i++) { 99 for (i = 0; i < args->buffer_count; i++) {
94 struct drm_i915_gem_object *obj;
95
96 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); 100 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
97 if (obj == NULL) { 101 if (obj == NULL) {
98 spin_unlock(&file->table_lock); 102 spin_unlock(&file->table_lock);
99 DRM_DEBUG("Invalid object handle %d at index %d\n", 103 DRM_DEBUG("Invalid object handle %d at index %d\n",
100 exec[i].handle, i); 104 exec[i].handle, i);
101 return -ENOENT; 105 ret = -ENOENT;
106 goto out;
102 } 107 }
103 108
104 if (!list_empty(&obj->exec_list)) { 109 if (!list_empty(&obj->obj_exec_link)) {
105 spin_unlock(&file->table_lock); 110 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 111 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
107 obj, exec[i].handle, i); 112 obj, exec[i].handle, i);
108 return -EINVAL; 113 ret = -EINVAL;
114 goto out;
109 } 115 }
110 116
111 drm_gem_object_reference(&obj->base); 117 drm_gem_object_reference(&obj->base);
112 list_add_tail(&obj->exec_list, &eb->objects); 118 list_add_tail(&obj->obj_exec_link, &objects);
119 }
120 spin_unlock(&file->table_lock);
121
122 i = 0;
123 list_for_each_entry(obj, &objects, obj_exec_link) {
124 struct i915_vma *vma;
113 125
114 obj->exec_entry = &exec[i]; 126 /*
127 * NOTE: We can leak any vmas created here when something fails
128 * later on. But that's no issue since vma_unbind can deal with
129 * vmas which are not actually bound. And since only
130 * lookup_or_create exists as an interface to get at the vma
131 * from the (obj, vm) we don't run the risk of creating
132 * duplicated vmas for the same vm.
133 */
134 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
135 if (IS_ERR(vma)) {
136 DRM_DEBUG("Failed to lookup VMA\n");
137 ret = PTR_ERR(vma);
138 goto out;
139 }
140
141 list_add_tail(&vma->exec_list, &eb->vmas);
142
143 vma->exec_entry = &exec[i];
115 if (eb->and < 0) { 144 if (eb->and < 0) {
116 eb->lut[i] = obj; 145 eb->lut[i] = vma;
117 } else { 146 } else {
118 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; 147 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
119 obj->exec_handle = handle; 148 vma->exec_handle = handle;
120 hlist_add_head(&obj->exec_node, 149 hlist_add_head(&vma->exec_node,
121 &eb->buckets[handle & eb->and]); 150 &eb->buckets[handle & eb->and]);
122 } 151 }
152 ++i;
123 } 153 }
124 spin_unlock(&file->table_lock);
125 154
126 return 0; 155
156out:
157 while (!list_empty(&objects)) {
158 obj = list_first_entry(&objects,
159 struct drm_i915_gem_object,
160 obj_exec_link);
161 list_del_init(&obj->obj_exec_link);
162 if (ret)
163 drm_gem_object_unreference(&obj->base);
164 }
165 return ret;
127} 166}
128 167
129static struct drm_i915_gem_object * 168static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
130eb_get_object(struct eb_objects *eb, unsigned long handle)
131{ 169{
132 if (eb->and < 0) { 170 if (eb->and < 0) {
133 if (handle >= -eb->and) 171 if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
139 177
140 head = &eb->buckets[handle & eb->and]; 178 head = &eb->buckets[handle & eb->and];
141 hlist_for_each(node, head) { 179 hlist_for_each(node, head) {
142 struct drm_i915_gem_object *obj; 180 struct i915_vma *vma;
143 181
144 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); 182 vma = hlist_entry(node, struct i915_vma, exec_node);
145 if (obj->exec_handle == handle) 183 if (vma->exec_handle == handle)
146 return obj; 184 return vma;
147 } 185 }
148 return NULL; 186 return NULL;
149 } 187 }
150} 188}
151 189
152static void 190static void eb_destroy(struct eb_vmas *eb) {
153eb_destroy(struct eb_objects *eb) 191 while (!list_empty(&eb->vmas)) {
154{ 192 struct i915_vma *vma;
155 while (!list_empty(&eb->objects)) {
156 struct drm_i915_gem_object *obj;
157 193
158 obj = list_first_entry(&eb->objects, 194 vma = list_first_entry(&eb->vmas,
159 struct drm_i915_gem_object, 195 struct i915_vma,
160 exec_list); 196 exec_list);
161 list_del_init(&obj->exec_list); 197 list_del_init(&vma->exec_list);
162 drm_gem_object_unreference(&obj->base); 198 drm_gem_object_unreference(&vma->obj->base);
163 } 199 }
164 kfree(eb); 200 kfree(eb);
165} 201}
166 202
167static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) 203static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
168{ 204{
169 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || 205 return (HAS_LLC(obj->base.dev) ||
206 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
170 !obj->map_and_fenceable || 207 !obj->map_and_fenceable ||
171 obj->cache_level != I915_CACHE_NONE); 208 obj->cache_level != I915_CACHE_NONE);
172} 209}
@@ -179,7 +216,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
179 char *vaddr; 216 char *vaddr;
180 int ret = -EINVAL; 217 int ret = -EINVAL;
181 218
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 219 ret = i915_gem_object_set_to_cpu_domain(obj, true);
183 if (ret) 220 if (ret)
184 return ret; 221 return ret;
185 222
@@ -223,22 +260,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
223 260
224static int 261static int
225i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 262i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
226 struct eb_objects *eb, 263 struct eb_vmas *eb,
227 struct drm_i915_gem_relocation_entry *reloc, 264 struct drm_i915_gem_relocation_entry *reloc,
228 struct i915_address_space *vm) 265 struct i915_address_space *vm)
229{ 266{
230 struct drm_device *dev = obj->base.dev; 267 struct drm_device *dev = obj->base.dev;
231 struct drm_gem_object *target_obj; 268 struct drm_gem_object *target_obj;
232 struct drm_i915_gem_object *target_i915_obj; 269 struct drm_i915_gem_object *target_i915_obj;
270 struct i915_vma *target_vma;
233 uint32_t target_offset; 271 uint32_t target_offset;
234 int ret = -EINVAL; 272 int ret = -EINVAL;
235 273
236 /* we've already hold a reference to all valid objects */ 274 /* we've already hold a reference to all valid objects */
237 target_obj = &eb_get_object(eb, reloc->target_handle)->base; 275 target_vma = eb_get_vma(eb, reloc->target_handle);
238 if (unlikely(target_obj == NULL)) 276 if (unlikely(target_vma == NULL))
239 return -ENOENT; 277 return -ENOENT;
278 target_i915_obj = target_vma->obj;
279 target_obj = &target_vma->obj->base;
240 280
241 target_i915_obj = to_intel_bo(target_obj);
242 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); 281 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
243 282
244 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and 283 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -320,14 +359,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
320} 359}
321 360
322static int 361static int
323i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 362i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
324 struct eb_objects *eb, 363 struct eb_vmas *eb)
325 struct i915_address_space *vm)
326{ 364{
327#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 365#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
328 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; 366 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
329 struct drm_i915_gem_relocation_entry __user *user_relocs; 367 struct drm_i915_gem_relocation_entry __user *user_relocs;
330 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 368 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
331 int remain, ret; 369 int remain, ret;
332 370
333 user_relocs = to_user_ptr(entry->relocs_ptr); 371 user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +384,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
346 do { 384 do {
347 u64 offset = r->presumed_offset; 385 u64 offset = r->presumed_offset;
348 386
349 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, 387 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
350 vm); 388 vma->vm);
351 if (ret) 389 if (ret)
352 return ret; 390 return ret;
353 391
@@ -368,17 +406,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
368} 406}
369 407
370static int 408static int
371i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, 409i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
372 struct eb_objects *eb, 410 struct eb_vmas *eb,
373 struct drm_i915_gem_relocation_entry *relocs, 411 struct drm_i915_gem_relocation_entry *relocs)
374 struct i915_address_space *vm)
375{ 412{
376 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 413 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
377 int i, ret; 414 int i, ret;
378 415
379 for (i = 0; i < entry->relocation_count; i++) { 416 for (i = 0; i < entry->relocation_count; i++) {
380 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], 417 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
381 vm); 418 vma->vm);
382 if (ret) 419 if (ret)
383 return ret; 420 return ret;
384 } 421 }
@@ -387,10 +424,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
387} 424}
388 425
389static int 426static int
390i915_gem_execbuffer_relocate(struct eb_objects *eb, 427i915_gem_execbuffer_relocate(struct eb_vmas *eb,
391 struct i915_address_space *vm) 428 struct i915_address_space *vm)
392{ 429{
393 struct drm_i915_gem_object *obj; 430 struct i915_vma *vma;
394 int ret = 0; 431 int ret = 0;
395 432
396 /* This is the fast path and we cannot handle a pagefault whilst 433 /* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +438,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
401 * lockdep complains vehemently. 438 * lockdep complains vehemently.
402 */ 439 */
403 pagefault_disable(); 440 pagefault_disable();
404 list_for_each_entry(obj, &eb->objects, exec_list) { 441 list_for_each_entry(vma, &eb->vmas, exec_list) {
405 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); 442 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
406 if (ret) 443 if (ret)
407 break; 444 break;
408 } 445 }
@@ -415,31 +452,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
415#define __EXEC_OBJECT_HAS_FENCE (1<<30) 452#define __EXEC_OBJECT_HAS_FENCE (1<<30)
416 453
417static int 454static int
418need_reloc_mappable(struct drm_i915_gem_object *obj) 455need_reloc_mappable(struct i915_vma *vma)
419{ 456{
420 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 457 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
421 return entry->relocation_count && !use_cpu_reloc(obj); 458 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
459 i915_is_ggtt(vma->vm);
422} 460}
423 461
424static int 462static int
425i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, 463i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
426 struct intel_ring_buffer *ring, 464 struct intel_ring_buffer *ring,
427 struct i915_address_space *vm, 465 bool *need_reloc)
428 bool *need_reloc)
429{ 466{
430 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 467 struct drm_i915_private *dev_priv = ring->dev->dev_private;
431 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 468 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
432 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 469 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
433 bool need_fence, need_mappable; 470 bool need_fence, need_mappable;
471 struct drm_i915_gem_object *obj = vma->obj;
434 int ret; 472 int ret;
435 473
436 need_fence = 474 need_fence =
437 has_fenced_gpu_access && 475 has_fenced_gpu_access &&
438 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 476 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
439 obj->tiling_mode != I915_TILING_NONE; 477 obj->tiling_mode != I915_TILING_NONE;
440 need_mappable = need_fence || need_reloc_mappable(obj); 478 need_mappable = need_fence || need_reloc_mappable(vma);
441 479
442 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, 480 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
443 false); 481 false);
444 if (ret) 482 if (ret)
445 return ret; 483 return ret;
@@ -467,8 +505,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
467 obj->has_aliasing_ppgtt_mapping = 1; 505 obj->has_aliasing_ppgtt_mapping = 1;
468 } 506 }
469 507
470 if (entry->offset != i915_gem_obj_offset(obj, vm)) { 508 if (entry->offset != vma->node.start) {
471 entry->offset = i915_gem_obj_offset(obj, vm); 509 entry->offset = vma->node.start;
472 *need_reloc = true; 510 *need_reloc = true;
473 } 511 }
474 512
@@ -485,14 +523,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
485} 523}
486 524
487static void 525static void
488i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) 526i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
489{ 527{
490 struct drm_i915_gem_exec_object2 *entry; 528 struct drm_i915_gem_exec_object2 *entry;
529 struct drm_i915_gem_object *obj = vma->obj;
491 530
492 if (!i915_gem_obj_bound_any(obj)) 531 if (!drm_mm_node_allocated(&vma->node))
493 return; 532 return;
494 533
495 entry = obj->exec_entry; 534 entry = vma->exec_entry;
496 535
497 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) 536 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
498 i915_gem_object_unpin_fence(obj); 537 i915_gem_object_unpin_fence(obj);
@@ -505,41 +544,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
505 544
506static int 545static int
507i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 546i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
508 struct list_head *objects, 547 struct list_head *vmas,
509 struct i915_address_space *vm,
510 bool *need_relocs) 548 bool *need_relocs)
511{ 549{
512 struct drm_i915_gem_object *obj; 550 struct drm_i915_gem_object *obj;
513 struct list_head ordered_objects; 551 struct i915_vma *vma;
552 struct i915_address_space *vm;
553 struct list_head ordered_vmas;
514 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 554 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
515 int retry; 555 int retry;
516 556
517 INIT_LIST_HEAD(&ordered_objects); 557 if (list_empty(vmas))
518 while (!list_empty(objects)) { 558 return 0;
559
560 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
561
562 INIT_LIST_HEAD(&ordered_vmas);
563 while (!list_empty(vmas)) {
519 struct drm_i915_gem_exec_object2 *entry; 564 struct drm_i915_gem_exec_object2 *entry;
520 bool need_fence, need_mappable; 565 bool need_fence, need_mappable;
521 566
522 obj = list_first_entry(objects, 567 vma = list_first_entry(vmas, struct i915_vma, exec_list);
523 struct drm_i915_gem_object, 568 obj = vma->obj;
524 exec_list); 569 entry = vma->exec_entry;
525 entry = obj->exec_entry;
526 570
527 need_fence = 571 need_fence =
528 has_fenced_gpu_access && 572 has_fenced_gpu_access &&
529 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 573 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
530 obj->tiling_mode != I915_TILING_NONE; 574 obj->tiling_mode != I915_TILING_NONE;
531 need_mappable = need_fence || need_reloc_mappable(obj); 575 need_mappable = need_fence || need_reloc_mappable(vma);
532 576
533 if (need_mappable) 577 if (need_mappable)
534 list_move(&obj->exec_list, &ordered_objects); 578 list_move(&vma->exec_list, &ordered_vmas);
535 else 579 else
536 list_move_tail(&obj->exec_list, &ordered_objects); 580 list_move_tail(&vma->exec_list, &ordered_vmas);
537 581
538 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; 582 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
539 obj->base.pending_write_domain = 0; 583 obj->base.pending_write_domain = 0;
540 obj->pending_fenced_gpu_access = false; 584 obj->pending_fenced_gpu_access = false;
541 } 585 }
542 list_splice(&ordered_objects, objects); 586 list_splice(&ordered_vmas, vmas);
543 587
544 /* Attempt to pin all of the buffers into the GTT. 588 /* Attempt to pin all of the buffers into the GTT.
545 * This is done in 3 phases: 589 * This is done in 3 phases:
@@ -558,52 +602,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
558 int ret = 0; 602 int ret = 0;
559 603
560 /* Unbind any ill-fitting objects or pin. */ 604 /* Unbind any ill-fitting objects or pin. */
561 list_for_each_entry(obj, objects, exec_list) { 605 list_for_each_entry(vma, vmas, exec_list) {
562 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 606 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
563 bool need_fence, need_mappable; 607 bool need_fence, need_mappable;
564 u32 obj_offset;
565 608
566 if (!i915_gem_obj_bound(obj, vm)) 609 obj = vma->obj;
610
611 if (!drm_mm_node_allocated(&vma->node))
567 continue; 612 continue;
568 613
569 obj_offset = i915_gem_obj_offset(obj, vm);
570 need_fence = 614 need_fence =
571 has_fenced_gpu_access && 615 has_fenced_gpu_access &&
572 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 616 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
573 obj->tiling_mode != I915_TILING_NONE; 617 obj->tiling_mode != I915_TILING_NONE;
574 need_mappable = need_fence || need_reloc_mappable(obj); 618 need_mappable = need_fence || need_reloc_mappable(vma);
575 619
576 WARN_ON((need_mappable || need_fence) && 620 WARN_ON((need_mappable || need_fence) &&
577 !i915_is_ggtt(vm)); 621 !i915_is_ggtt(vma->vm));
578 622
579 if ((entry->alignment && 623 if ((entry->alignment &&
580 obj_offset & (entry->alignment - 1)) || 624 vma->node.start & (entry->alignment - 1)) ||
581 (need_mappable && !obj->map_and_fenceable)) 625 (need_mappable && !obj->map_and_fenceable))
582 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); 626 ret = i915_vma_unbind(vma);
583 else 627 else
584 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); 628 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
585 if (ret) 629 if (ret)
586 goto err; 630 goto err;
587 } 631 }
588 632
589 /* Bind fresh objects */ 633 /* Bind fresh objects */
590 list_for_each_entry(obj, objects, exec_list) { 634 list_for_each_entry(vma, vmas, exec_list) {
591 if (i915_gem_obj_bound(obj, vm)) 635 if (drm_mm_node_allocated(&vma->node))
592 continue; 636 continue;
593 637
594 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); 638 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
595 if (ret) 639 if (ret)
596 goto err; 640 goto err;
597 } 641 }
598 642
599err: /* Decrement pin count for bound objects */ 643err: /* Decrement pin count for bound objects */
600 list_for_each_entry(obj, objects, exec_list) 644 list_for_each_entry(vma, vmas, exec_list)
601 i915_gem_execbuffer_unreserve_object(obj); 645 i915_gem_execbuffer_unreserve_vma(vma);
602 646
603 if (ret != -ENOSPC || retry++) 647 if (ret != -ENOSPC || retry++)
604 return ret; 648 return ret;
605 649
606 ret = i915_gem_evict_everything(ring->dev); 650 ret = i915_gem_evict_vm(vm, true);
607 if (ret) 651 if (ret)
608 return ret; 652 return ret;
609 } while (1); 653 } while (1);
@@ -614,24 +658,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
614 struct drm_i915_gem_execbuffer2 *args, 658 struct drm_i915_gem_execbuffer2 *args,
615 struct drm_file *file, 659 struct drm_file *file,
616 struct intel_ring_buffer *ring, 660 struct intel_ring_buffer *ring,
617 struct eb_objects *eb, 661 struct eb_vmas *eb,
618 struct drm_i915_gem_exec_object2 *exec, 662 struct drm_i915_gem_exec_object2 *exec)
619 struct i915_address_space *vm)
620{ 663{
621 struct drm_i915_gem_relocation_entry *reloc; 664 struct drm_i915_gem_relocation_entry *reloc;
622 struct drm_i915_gem_object *obj; 665 struct i915_address_space *vm;
666 struct i915_vma *vma;
623 bool need_relocs; 667 bool need_relocs;
624 int *reloc_offset; 668 int *reloc_offset;
625 int i, total, ret; 669 int i, total, ret;
626 int count = args->buffer_count; 670 unsigned count = args->buffer_count;
671
672 if (WARN_ON(list_empty(&eb->vmas)))
673 return 0;
674
675 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
627 676
628 /* We may process another execbuffer during the unlock... */ 677 /* We may process another execbuffer during the unlock... */
629 while (!list_empty(&eb->objects)) { 678 while (!list_empty(&eb->vmas)) {
630 obj = list_first_entry(&eb->objects, 679 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
631 struct drm_i915_gem_object, 680 list_del_init(&vma->exec_list);
632 exec_list); 681 drm_gem_object_unreference(&vma->obj->base);
633 list_del_init(&obj->exec_list);
634 drm_gem_object_unreference(&obj->base);
635 } 682 }
636 683
637 mutex_unlock(&dev->struct_mutex); 684 mutex_unlock(&dev->struct_mutex);
@@ -695,20 +742,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
695 742
696 /* reacquire the objects */ 743 /* reacquire the objects */
697 eb_reset(eb); 744 eb_reset(eb);
698 ret = eb_lookup_objects(eb, exec, args, file); 745 ret = eb_lookup_vmas(eb, exec, args, vm, file);
699 if (ret) 746 if (ret)
700 goto err; 747 goto err;
701 748
702 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 749 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
703 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); 750 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
704 if (ret) 751 if (ret)
705 goto err; 752 goto err;
706 753
707 list_for_each_entry(obj, &eb->objects, exec_list) { 754 list_for_each_entry(vma, &eb->vmas, exec_list) {
708 int offset = obj->exec_entry - exec; 755 int offset = vma->exec_entry - exec;
709 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 756 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
710 reloc + reloc_offset[offset], 757 reloc + reloc_offset[offset]);
711 vm);
712 if (ret) 758 if (ret)
713 goto err; 759 goto err;
714 } 760 }
@@ -727,14 +773,15 @@ err:
727 773
728static int 774static int
729i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 775i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
730 struct list_head *objects) 776 struct list_head *vmas)
731{ 777{
732 struct drm_i915_gem_object *obj; 778 struct i915_vma *vma;
733 uint32_t flush_domains = 0; 779 uint32_t flush_domains = 0;
734 bool flush_chipset = false; 780 bool flush_chipset = false;
735 int ret; 781 int ret;
736 782
737 list_for_each_entry(obj, objects, exec_list) { 783 list_for_each_entry(vma, vmas, exec_list) {
784 struct drm_i915_gem_object *obj = vma->obj;
738 ret = i915_gem_object_sync(obj, ring); 785 ret = i915_gem_object_sync(obj, ring);
739 if (ret) 786 if (ret)
740 return ret; 787 return ret;
@@ -771,8 +818,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
771 int count) 818 int count)
772{ 819{
773 int i; 820 int i;
774 int relocs_total = 0; 821 unsigned relocs_total = 0;
775 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 822 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
776 823
777 for (i = 0; i < count; i++) { 824 for (i = 0; i < count; i++) {
778 char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 825 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +856,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
809} 856}
810 857
811static void 858static void
812i915_gem_execbuffer_move_to_active(struct list_head *objects, 859i915_gem_execbuffer_move_to_active(struct list_head *vmas,
813 struct i915_address_space *vm,
814 struct intel_ring_buffer *ring) 860 struct intel_ring_buffer *ring)
815{ 861{
816 struct drm_i915_gem_object *obj; 862 struct i915_vma *vma;
817 863
818 list_for_each_entry(obj, objects, exec_list) { 864 list_for_each_entry(vma, vmas, exec_list) {
865 struct drm_i915_gem_object *obj = vma->obj;
819 u32 old_read = obj->base.read_domains; 866 u32 old_read = obj->base.read_domains;
820 u32 old_write = obj->base.write_domain; 867 u32 old_write = obj->base.write_domain;
821 868
@@ -825,9 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
825 obj->base.read_domains = obj->base.pending_read_domains; 872 obj->base.read_domains = obj->base.pending_read_domains;
826 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 873 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
827 874
828 /* FIXME: This lookup gets fixed later <-- danvet */ 875 i915_vma_move_to_active(vma, ring);
829 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
830 i915_gem_object_move_to_active(obj, ring);
831 if (obj->base.write_domain) { 876 if (obj->base.write_domain) {
832 obj->dirty = 1; 877 obj->dirty = 1;
833 obj->last_write_seqno = intel_ring_get_seqno(ring); 878 obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +930,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
885 struct i915_address_space *vm) 930 struct i915_address_space *vm)
886{ 931{
887 drm_i915_private_t *dev_priv = dev->dev_private; 932 drm_i915_private_t *dev_priv = dev->dev_private;
888 struct eb_objects *eb; 933 struct eb_vmas *eb;
889 struct drm_i915_gem_object *batch_obj; 934 struct drm_i915_gem_object *batch_obj;
890 struct drm_clip_rect *cliprects = NULL; 935 struct drm_clip_rect *cliprects = NULL;
891 struct intel_ring_buffer *ring; 936 struct intel_ring_buffer *ring;
937 struct i915_ctx_hang_stats *hs;
892 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 938 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
893 u32 exec_start, exec_len; 939 u32 exec_start, exec_len;
894 u32 mask, flags; 940 u32 mask, flags;
@@ -1000,7 +1046,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1000 return -EINVAL; 1046 return -EINVAL;
1001 } 1047 }
1002 1048
1003 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1049 cliprects = kcalloc(args->num_cliprects,
1050 sizeof(*cliprects),
1004 GFP_KERNEL); 1051 GFP_KERNEL);
1005 if (cliprects == NULL) { 1052 if (cliprects == NULL) {
1006 ret = -ENOMEM; 1053 ret = -ENOMEM;
@@ -1025,7 +1072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1025 goto pre_mutex_err; 1072 goto pre_mutex_err;
1026 } 1073 }
1027 1074
1028 eb = eb_create(args); 1075 eb = eb_create(args, vm);
1029 if (eb == NULL) { 1076 if (eb == NULL) {
1030 mutex_unlock(&dev->struct_mutex); 1077 mutex_unlock(&dev->struct_mutex);
1031 ret = -ENOMEM; 1078 ret = -ENOMEM;
@@ -1033,18 +1080,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1033 } 1080 }
1034 1081
1035 /* Look up object handles */ 1082 /* Look up object handles */
1036 ret = eb_lookup_objects(eb, exec, args, file); 1083 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1037 if (ret) 1084 if (ret)
1038 goto err; 1085 goto err;
1039 1086
1040 /* take note of the batch buffer before we might reorder the lists */ 1087 /* take note of the batch buffer before we might reorder the lists */
1041 batch_obj = list_entry(eb->objects.prev, 1088 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1042 struct drm_i915_gem_object,
1043 exec_list);
1044 1089
1045 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1090 /* Move the objects en-masse into the GTT, evicting if necessary. */
1046 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1091 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1047 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); 1092 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1048 if (ret) 1093 if (ret)
1049 goto err; 1094 goto err;
1050 1095
@@ -1054,7 +1099,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1054 if (ret) { 1099 if (ret) {
1055 if (ret == -EFAULT) { 1100 if (ret == -EFAULT) {
1056 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, 1101 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1057 eb, exec, vm); 1102 eb, exec);
1058 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1103 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1059 } 1104 }
1060 if (ret) 1105 if (ret)
@@ -1076,10 +1121,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1076 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1121 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1077 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1122 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1078 1123
1079 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); 1124 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1080 if (ret) 1125 if (ret)
1081 goto err; 1126 goto err;
1082 1127
1128 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
1129 if (IS_ERR(hs)) {
1130 ret = PTR_ERR(hs);
1131 goto err;
1132 }
1133
1134 if (hs->banned) {
1135 ret = -EIO;
1136 goto err;
1137 }
1138
1083 ret = i915_switch_context(ring, file, ctx_id); 1139 ret = i915_switch_context(ring, file, ctx_id);
1084 if (ret) 1140 if (ret)
1085 goto err; 1141 goto err;
@@ -1131,7 +1187,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1131 1187
1132 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1188 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1133 1189
1134 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); 1190 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1135 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1191 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1136 1192
1137err: 1193err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1f7b4caefb6e..c4c42e7cbd7b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -342,7 +342,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
342 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 342 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
343 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 343 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
344 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 344 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
345 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 345 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
346 GFP_KERNEL); 346 GFP_KERNEL);
347 if (!ppgtt->pt_pages) 347 if (!ppgtt->pt_pages)
348 return -ENOMEM; 348 return -ENOMEM;
@@ -353,7 +353,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
353 goto err_pt_alloc; 353 goto err_pt_alloc;
354 } 354 }
355 355
356 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, 356 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
357 GFP_KERNEL); 357 GFP_KERNEL);
358 if (!ppgtt->pt_dma_addr) 358 if (!ppgtt->pt_dma_addr)
359 goto err_pt_alloc; 359 goto err_pt_alloc;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e15a1d90037d..d284d892ed94 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
395 if (gtt_offset == I915_GTT_OFFSET_NONE) 395 if (gtt_offset == I915_GTT_OFFSET_NONE)
396 return obj; 396 return obj;
397 397
398 vma = i915_gem_vma_create(obj, ggtt); 398 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
399 if (IS_ERR(vma)) { 399 if (IS_ERR(vma)) {
400 ret = PTR_ERR(vma); 400 ret = PTR_ERR(vma);
401 goto err_out; 401 goto err_out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef9c896..b13905348048 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
308 return -EINVAL; 308 return -EINVAL;
309 } 309 }
310 310
311 if (obj->pin_count) { 311 if (obj->pin_count || obj->framebuffer_references) {
312 drm_gem_object_unreference_unlocked(&obj->base); 312 drm_gem_object_unreference_unlocked(&obj->base);
313 return -EBUSY; 313 return -EBUSY;
314 } 314 }
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
393 /* Try to preallocate memory required to save swizzling on put-pages */ 393 /* Try to preallocate memory required to save swizzling on put-pages */
394 if (i915_gem_object_needs_bit17_swizzle(obj)) { 394 if (i915_gem_object_needs_bit17_swizzle(obj)) {
395 if (obj->bit_17 == NULL) { 395 if (obj->bit_17 == NULL) {
396 obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * 396 obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
397 sizeof(long), GFP_KERNEL); 397 sizeof(long), GFP_KERNEL);
398 } 398 }
399 } else { 399 } else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
504 int i; 504 int i;
505 505
506 if (obj->bit_17 == NULL) { 506 if (obj->bit_17 == NULL) {
507 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 507 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
508 sizeof(long), GFP_KERNEL); 508 sizeof(long), GFP_KERNEL);
509 if (obj->bit_17 == NULL) { 509 if (obj->bit_17 == NULL) {
510 DRM_ERROR("Failed to allocate memory for bit 17 " 510 DRM_ERROR("Failed to allocate memory for bit 17 "
511 "record\n"); 511 "record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index dae364f0028c..a8bb213da79f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
215 } 215 }
216} 216}
217 217
218static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
219{
220 switch (a) {
221 case HANGCHECK_IDLE:
222 return "idle";
223 case HANGCHECK_WAIT:
224 return "wait";
225 case HANGCHECK_ACTIVE:
226 return "active";
227 case HANGCHECK_KICK:
228 return "kick";
229 case HANGCHECK_HUNG:
230 return "hung";
231 }
232
233 return "unknown";
234}
235
218static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 236static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
219 struct drm_device *dev, 237 struct drm_device *dev,
220 struct drm_i915_error_state *error, 238 struct drm_i915_error_state *error,
@@ -231,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
231 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 249 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
232 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 250 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
233 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 251 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
234 252 if (INTEL_INFO(dev)->gen >= 4)
253 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
235 if (INTEL_INFO(dev)->gen >= 4) 254 if (INTEL_INFO(dev)->gen >= 4)
236 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 255 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
237 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 256 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -255,6 +274,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
255 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 274 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
256 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 275 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
257 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 276 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
277 err_printf(m, " hangcheck: %s [%d]\n",
278 hangcheck_action_to_str(error->hangcheck_action[ring]),
279 error->hangcheck_score[ring]);
258} 280}
259 281
260void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 282void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +305,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
283 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 305 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
284 error->time.tv_usec); 306 error->time.tv_usec);
285 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 307 err_printf(m, "Kernel: " UTS_RELEASE "\n");
286 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 308 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
287 err_printf(m, "EIR: 0x%08x\n", error->eir); 309 err_printf(m, "EIR: 0x%08x\n", error->eir);
288 err_printf(m, "IER: 0x%08x\n", error->ier); 310 err_printf(m, "IER: 0x%08x\n", error->ier);
289 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 311 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
290 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 312 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
291 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 313 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
292 err_printf(m, "CCID: 0x%08x\n", error->ccid); 314 err_printf(m, "CCID: 0x%08x\n", error->ccid);
315 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
293 316
294 for (i = 0; i < dev_priv->num_fence_regs; i++) 317 for (i = 0; i < dev_priv->num_fence_regs; i++)
295 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 318 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -703,6 +726,7 @@ static void i915_record_ring_state(struct drm_device *dev,
703 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 726 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
704 if (ring->id == RCS) 727 if (ring->id == RCS)
705 error->bbaddr = I915_READ64(BB_ADDR); 728 error->bbaddr = I915_READ64(BB_ADDR);
729 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
706 } else { 730 } else {
707 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 731 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
708 error->ipeir[ring->id] = I915_READ(IPEIR); 732 error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -720,6 +744,9 @@ static void i915_record_ring_state(struct drm_device *dev,
720 744
721 error->cpu_ring_head[ring->id] = ring->head; 745 error->cpu_ring_head[ring->id] = ring->head;
722 error->cpu_ring_tail[ring->id] = ring->tail; 746 error->cpu_ring_tail[ring->id] = ring->tail;
747
748 error->hangcheck_score[ring->id] = ring->hangcheck.score;
749 error->hangcheck_action[ring->id] = ring->hangcheck.action;
723} 750}
724 751
725 752
@@ -769,7 +796,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
769 796
770 error->ring[i].num_requests = count; 797 error->ring[i].num_requests = count;
771 error->ring[i].requests = 798 error->ring[i].requests =
772 kmalloc(count*sizeof(struct drm_i915_error_request), 799 kcalloc(count, sizeof(*error->ring[i].requests),
773 GFP_ATOMIC); 800 GFP_ATOMIC);
774 if (error->ring[i].requests == NULL) { 801 if (error->ring[i].requests == NULL) {
775 error->ring[i].num_requests = 0; 802 error->ring[i].num_requests = 0;
@@ -811,7 +838,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
811 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 838 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
812 839
813 if (i) { 840 if (i) {
814 active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); 841 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
815 if (active_bo) 842 if (active_bo)
816 pinned_bo = active_bo + error->active_bo_count[ndx]; 843 pinned_bo = active_bo + error->active_bo_count[ndx];
817 } 844 }
@@ -885,8 +912,12 @@ void i915_capture_error_state(struct drm_device *dev)
885 return; 912 return;
886 } 913 }
887 914
888 DRM_INFO("capturing error event; look for more information in " 915 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
889 "/sys/class/drm/card%d/error\n", dev->primary->index); 916 dev->primary->index);
917 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
918 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
919 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
920 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
890 921
891 kref_init(&error->ref); 922 kref_init(&error->ref);
892 error->eir = I915_READ(EIR); 923 error->eir = I915_READ(EIR);
@@ -988,6 +1019,7 @@ const char *i915_cache_level_str(int type)
988 case I915_CACHE_NONE: return " uncached"; 1019 case I915_CACHE_NONE: return " uncached";
989 case I915_CACHE_LLC: return " snooped or LLC"; 1020 case I915_CACHE_LLC: return " snooped or LLC";
990 case I915_CACHE_L3_LLC: return " L3+LLC"; 1021 case I915_CACHE_L3_LLC: return " L3+LLC";
1022 case I915_CACHE_WT: return " WT";
991 default: return ""; 1023 default: return "";
992 } 1024 }
993} 1025}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228fd9bd..a228176676b2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/sysrq.h> 31#include <linux/sysrq.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/circ_buf.h>
33#include <drm/drmP.h> 34#include <drm/drmP.h>
34#include <drm/i915_drm.h> 35#include <drm/i915_drm.h>
35#include "i915_drv.h" 36#include "i915_drv.h"
@@ -441,7 +442,7 @@ done:
441 442
442 443
443void 444void
444i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 445i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
445{ 446{
446 u32 reg = PIPESTAT(pipe); 447 u32 reg = PIPESTAT(pipe);
447 u32 pipestat = I915_READ(reg) & 0x7fff0000; 448 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +459,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
458} 459}
459 460
460void 461void
461i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 462i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
462{ 463{
463 u32 reg = PIPESTAT(pipe); 464 u32 reg = PIPESTAT(pipe);
464 u32 pipestat = I915_READ(reg) & 0x7fff0000; 465 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +487,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
486 487
487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 488 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
488 489
489 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 490 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
490 if (INTEL_INFO(dev)->gen >= 4) 491 if (INTEL_INFO(dev)->gen >= 4)
491 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 492 i915_enable_pipestat(dev_priv, PIPE_A,
493 PIPE_LEGACY_BLC_EVENT_ENABLE);
492 494
493 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 495 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
494} 496}
@@ -518,6 +520,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
518 } 520 }
519} 521}
520 522
523static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
524{
525 /* Gen2 doesn't have a hardware frame counter */
526 return 0;
527}
528
521/* Called from drm generic code, passed a 'crtc', which 529/* Called from drm generic code, passed a 'crtc', which
522 * we use as a pipe index 530 * we use as a pipe index
523 */ 531 */
@@ -526,7 +534,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 534 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
527 unsigned long high_frame; 535 unsigned long high_frame;
528 unsigned long low_frame; 536 unsigned long low_frame;
529 u32 high1, high2, low; 537 u32 high1, high2, low, pixel, vbl_start;
530 538
531 if (!i915_pipe_enabled(dev, pipe)) { 539 if (!i915_pipe_enabled(dev, pipe)) {
532 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 540 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +542,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
534 return 0; 542 return 0;
535 } 543 }
536 544
545 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
546 struct intel_crtc *intel_crtc =
547 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
548 const struct drm_display_mode *mode =
549 &intel_crtc->config.adjusted_mode;
550
551 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
552 } else {
553 enum transcoder cpu_transcoder =
554 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
555 u32 htotal;
556
557 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
558 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
559
560 vbl_start *= htotal;
561 }
562
537 high_frame = PIPEFRAME(pipe); 563 high_frame = PIPEFRAME(pipe);
538 low_frame = PIPEFRAMEPIXEL(pipe); 564 low_frame = PIPEFRAMEPIXEL(pipe);
539 565
@@ -544,13 +570,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
544 */ 570 */
545 do { 571 do {
546 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 572 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
547 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 573 low = I915_READ(low_frame);
548 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 574 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
549 } while (high1 != high2); 575 } while (high1 != high2);
550 576
551 high1 >>= PIPE_FRAME_HIGH_SHIFT; 577 high1 >>= PIPE_FRAME_HIGH_SHIFT;
578 pixel = low & PIPE_PIXEL_MASK;
552 low >>= PIPE_FRAME_LOW_SHIFT; 579 low >>= PIPE_FRAME_LOW_SHIFT;
553 return (high1 << 8) | low; 580
581 /*
582 * The frame counter increments at beginning of active.
583 * Cook up a vblank counter by also checking the pixel
584 * counter against vblank start.
585 */
586 return ((high1 << 8) | low) + (pixel >= vbl_start);
554} 587}
555 588
556static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 589static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,37 +600,98 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
567 return I915_READ(reg); 600 return I915_READ(reg);
568} 601}
569 602
603static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
604{
605 struct drm_i915_private *dev_priv = dev->dev_private;
606 uint32_t status;
607
608 if (IS_VALLEYVIEW(dev)) {
609 status = pipe == PIPE_A ?
610 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
611 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
612
613 return I915_READ(VLV_ISR) & status;
614 } else if (IS_GEN2(dev)) {
615 status = pipe == PIPE_A ?
616 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
617 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
618
619 return I915_READ16(ISR) & status;
620 } else if (INTEL_INFO(dev)->gen < 5) {
621 status = pipe == PIPE_A ?
622 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
623 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
624
625 return I915_READ(ISR) & status;
626 } else if (INTEL_INFO(dev)->gen < 7) {
627 status = pipe == PIPE_A ?
628 DE_PIPEA_VBLANK :
629 DE_PIPEB_VBLANK;
630
631 return I915_READ(DEISR) & status;
632 } else {
633 switch (pipe) {
634 default:
635 case PIPE_A:
636 status = DE_PIPEA_VBLANK_IVB;
637 break;
638 case PIPE_B:
639 status = DE_PIPEB_VBLANK_IVB;
640 break;
641 case PIPE_C:
642 status = DE_PIPEC_VBLANK_IVB;
643 break;
644 }
645
646 return I915_READ(DEISR) & status;
647 }
648}
649
570static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 650static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
571 int *vpos, int *hpos) 651 int *vpos, int *hpos)
572{ 652{
573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 653 struct drm_i915_private *dev_priv = dev->dev_private;
574 u32 vbl = 0, position = 0; 654 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
656 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
657 int position;
575 int vbl_start, vbl_end, htotal, vtotal; 658 int vbl_start, vbl_end, htotal, vtotal;
576 bool in_vbl = true; 659 bool in_vbl = true;
577 int ret = 0; 660 int ret = 0;
578 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
579 pipe);
580 661
581 if (!i915_pipe_enabled(dev, pipe)) { 662 if (!intel_crtc->active) {
582 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 663 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
583 "pipe %c\n", pipe_name(pipe)); 664 "pipe %c\n", pipe_name(pipe));
584 return 0; 665 return 0;
585 } 666 }
586 667
587 /* Get vtotal. */ 668 htotal = mode->crtc_htotal;
588 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 669 vtotal = mode->crtc_vtotal;
670 vbl_start = mode->crtc_vblank_start;
671 vbl_end = mode->crtc_vblank_end;
589 672
590 if (INTEL_INFO(dev)->gen >= 4) { 673 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
674
675 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
591 /* No obvious pixelcount register. Only query vertical 676 /* No obvious pixelcount register. Only query vertical
592 * scanout position from Display scan line register. 677 * scanout position from Display scan line register.
593 */ 678 */
594 position = I915_READ(PIPEDSL(pipe)); 679 if (IS_GEN2(dev))
680 position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
681 else
682 position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
595 683
596 /* Decode into vertical scanout position. Don't have 684 /*
597 * horizontal scanout position. 685 * The scanline counter increments at the leading edge
686 * of hsync, ie. it completely misses the active portion
687 * of the line. Fix up the counter at both edges of vblank
688 * to get a more accurate picture whether we're in vblank
689 * or not.
598 */ 690 */
599 *vpos = position & 0x1fff; 691 in_vbl = intel_pipe_in_vblank(dev, pipe);
600 *hpos = 0; 692 if ((in_vbl && position == vbl_start - 1) ||
693 (!in_vbl && position == vbl_end - 1))
694 position = (position + 1) % vtotal;
601 } else { 695 } else {
602 /* Have access to pixelcount since start of frame. 696 /* Have access to pixelcount since start of frame.
603 * We can split this into vertical and horizontal 697 * We can split this into vertical and horizontal
@@ -605,28 +699,32 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
605 */ 699 */
606 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 700 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
607 701
608 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 702 /* convert to pixel counts */
609 *vpos = position / htotal; 703 vbl_start *= htotal;
610 *hpos = position - (*vpos * htotal); 704 vbl_end *= htotal;
705 vtotal *= htotal;
611 } 706 }
612 707
613 /* Query vblank area. */ 708 in_vbl = position >= vbl_start && position < vbl_end;
614 vbl = I915_READ(VBLANK(cpu_transcoder));
615
616 /* Test position against vblank region. */
617 vbl_start = vbl & 0x1fff;
618 vbl_end = (vbl >> 16) & 0x1fff;
619
620 if ((*vpos < vbl_start) || (*vpos > vbl_end))
621 in_vbl = false;
622 709
623 /* Inside "upper part" of vblank area? Apply corrective offset: */ 710 /*
624 if (in_vbl && (*vpos >= vbl_start)) 711 * While in vblank, position will be negative
625 *vpos = *vpos - vtotal; 712 * counting up towards 0 at vbl_end. And outside
713 * vblank, position will be positive counting
714 * up since vbl_end.
715 */
716 if (position >= vbl_start)
717 position -= vbl_end;
718 else
719 position += vtotal - vbl_end;
626 720
627 /* Readouts valid? */ 721 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
628 if (vbl > 0) 722 *vpos = position;
629 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 723 *hpos = 0;
724 } else {
725 *vpos = position / htotal;
726 *hpos = position - (*vpos * htotal);
727 }
630 728
631 /* In vblank? */ 729 /* In vblank? */
632 if (in_vbl) 730 if (in_vbl)
@@ -665,7 +763,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
665 crtc); 763 crtc);
666} 764}
667 765
668static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 766static bool intel_hpd_irq_event(struct drm_device *dev,
767 struct drm_connector *connector)
669{ 768{
670 enum drm_connector_status old_status; 769 enum drm_connector_status old_status;
671 770
@@ -673,11 +772,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
673 old_status = connector->status; 772 old_status = connector->status;
674 773
675 connector->status = connector->funcs->detect(connector, false); 774 connector->status = connector->funcs->detect(connector, false);
676 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 775 if (old_status == connector->status)
776 return false;
777
778 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
677 connector->base.id, 779 connector->base.id,
678 drm_get_connector_name(connector), 780 drm_get_connector_name(connector),
679 old_status, connector->status); 781 drm_get_connector_status_name(old_status),
680 return (old_status != connector->status); 782 drm_get_connector_status_name(connector->status));
783
784 return true;
681} 785}
682 786
683/* 787/*
@@ -801,7 +905,7 @@ static void notify_ring(struct drm_device *dev,
801 if (ring->obj == NULL) 905 if (ring->obj == NULL)
802 return; 906 return;
803 907
804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 908 trace_i915_gem_request_complete(ring);
805 909
806 wake_up_all(&ring->irq_queue); 910 wake_up_all(&ring->irq_queue);
807 i915_queue_hangcheck(dev); 911 i915_queue_hangcheck(dev);
@@ -812,7 +916,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 916 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
813 rps.work); 917 rps.work);
814 u32 pm_iir; 918 u32 pm_iir;
815 u8 new_delay; 919 int new_delay, adj;
816 920
817 spin_lock_irq(&dev_priv->irq_lock); 921 spin_lock_irq(&dev_priv->irq_lock);
818 pm_iir = dev_priv->rps.pm_iir; 922 pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +933,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
829 933
830 mutex_lock(&dev_priv->rps.hw_lock); 934 mutex_lock(&dev_priv->rps.hw_lock);
831 935
936 adj = dev_priv->rps.last_adj;
832 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 937 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
833 new_delay = dev_priv->rps.cur_delay + 1; 938 if (adj > 0)
939 adj *= 2;
940 else
941 adj = 1;
942 new_delay = dev_priv->rps.cur_delay + adj;
834 943
835 /* 944 /*
836 * For better performance, jump directly 945 * For better performance, jump directly
837 * to RPe if we're below it. 946 * to RPe if we're below it.
838 */ 947 */
839 if (IS_VALLEYVIEW(dev_priv->dev) && 948 if (new_delay < dev_priv->rps.rpe_delay)
840 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 949 new_delay = dev_priv->rps.rpe_delay;
950 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
951 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
841 new_delay = dev_priv->rps.rpe_delay; 952 new_delay = dev_priv->rps.rpe_delay;
842 } else 953 else
843 new_delay = dev_priv->rps.cur_delay - 1; 954 new_delay = dev_priv->rps.min_delay;
955 adj = 0;
956 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
957 if (adj < 0)
958 adj *= 2;
959 else
960 adj = -1;
961 new_delay = dev_priv->rps.cur_delay + adj;
962 } else { /* unknown event */
963 new_delay = dev_priv->rps.cur_delay;
964 }
844 965
845 /* sysfs frequency interfaces may have snuck in while servicing the 966 /* sysfs frequency interfaces may have snuck in while servicing the
846 * interrupt 967 * interrupt
847 */ 968 */
848 if (new_delay >= dev_priv->rps.min_delay && 969 if (new_delay < (int)dev_priv->rps.min_delay)
849 new_delay <= dev_priv->rps.max_delay) { 970 new_delay = dev_priv->rps.min_delay;
850 if (IS_VALLEYVIEW(dev_priv->dev)) 971 if (new_delay > (int)dev_priv->rps.max_delay)
851 valleyview_set_rps(dev_priv->dev, new_delay); 972 new_delay = dev_priv->rps.max_delay;
852 else 973 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
853 gen6_set_rps(dev_priv->dev, new_delay); 974
854 } 975 if (IS_VALLEYVIEW(dev_priv->dev))
855 976 valleyview_set_rps(dev_priv->dev, new_delay);
856 if (IS_VALLEYVIEW(dev_priv->dev)) { 977 else
857 /* 978 gen6_set_rps(dev_priv->dev, new_delay);
858 * On VLV, when we enter RC6 we may not be at the minimum
859 * voltage level, so arm a timer to check. It should only
860 * fire when there's activity or once after we've entered
861 * RC6, and then won't be re-armed until the next RPS interrupt.
862 */
863 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
864 msecs_to_jiffies(100));
865 }
866 979
867 mutex_unlock(&dev_priv->rps.hw_lock); 980 mutex_unlock(&dev_priv->rps.hw_lock);
868} 981}
@@ -882,9 +995,10 @@ static void ivybridge_parity_work(struct work_struct *work)
882 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 995 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
883 l3_parity.error_work); 996 l3_parity.error_work);
884 u32 error_status, row, bank, subbank; 997 u32 error_status, row, bank, subbank;
885 char *parity_event[5]; 998 char *parity_event[6];
886 uint32_t misccpctl; 999 uint32_t misccpctl;
887 unsigned long flags; 1000 unsigned long flags;
1001 uint8_t slice = 0;
888 1002
889 /* We must turn off DOP level clock gating to access the L3 registers. 1003 /* We must turn off DOP level clock gating to access the L3 registers.
890 * In order to prevent a get/put style interface, acquire struct mutex 1004 * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1006,81 @@ static void ivybridge_parity_work(struct work_struct *work)
892 */ 1006 */
893 mutex_lock(&dev_priv->dev->struct_mutex); 1007 mutex_lock(&dev_priv->dev->struct_mutex);
894 1008
1009 /* If we've screwed up tracking, just let the interrupt fire again */
1010 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1011 goto out;
1012
895 misccpctl = I915_READ(GEN7_MISCCPCTL); 1013 misccpctl = I915_READ(GEN7_MISCCPCTL);
896 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1014 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
897 POSTING_READ(GEN7_MISCCPCTL); 1015 POSTING_READ(GEN7_MISCCPCTL);
898 1016
899 error_status = I915_READ(GEN7_L3CDERRST1); 1017 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
900 row = GEN7_PARITY_ERROR_ROW(error_status); 1018 u32 reg;
901 bank = GEN7_PARITY_ERROR_BANK(error_status);
902 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
903 1019
904 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 1020 slice--;
905 GEN7_L3CDERRST1_ENABLE); 1021 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
906 POSTING_READ(GEN7_L3CDERRST1); 1022 break;
907 1023
908 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1024 dev_priv->l3_parity.which_slice &= ~(1<<slice);
909 1025
910 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1026 reg = GEN7_L3CDERRST1 + (slice * 0x200);
911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
913 1027
914 mutex_unlock(&dev_priv->dev->struct_mutex); 1028 error_status = I915_READ(reg);
1029 row = GEN7_PARITY_ERROR_ROW(error_status);
1030 bank = GEN7_PARITY_ERROR_BANK(error_status);
1031 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1032
1033 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1034 POSTING_READ(reg);
915 1035
916 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1036 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1037 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1038 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1039 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
920 parity_event[4] = NULL; 1040 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1041 parity_event[5] = NULL;
921 1042
922 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 1043 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
923 KOBJ_CHANGE, parity_event); 1044 KOBJ_CHANGE, parity_event);
924 1045
925 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 1046 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
926 row, bank, subbank); 1047 slice, row, bank, subbank);
927 1048
928 kfree(parity_event[3]); 1049 kfree(parity_event[4]);
929 kfree(parity_event[2]); 1050 kfree(parity_event[3]);
930 kfree(parity_event[1]); 1051 kfree(parity_event[2]);
1052 kfree(parity_event[1]);
1053 }
1054
1055 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1056
1057out:
1058 WARN_ON(dev_priv->l3_parity.which_slice);
1059 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1060 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1061 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1062
1063 mutex_unlock(&dev_priv->dev->struct_mutex);
931} 1064}
932 1065
933static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 1066static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
934{ 1067{
935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1068 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
936 1069
937 if (!HAS_L3_GPU_CACHE(dev)) 1070 if (!HAS_L3_DPF(dev))
938 return; 1071 return;
939 1072
940 spin_lock(&dev_priv->irq_lock); 1073 spin_lock(&dev_priv->irq_lock);
941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1074 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
942 spin_unlock(&dev_priv->irq_lock); 1075 spin_unlock(&dev_priv->irq_lock);
943 1076
1077 iir &= GT_PARITY_ERROR(dev);
1078 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1079 dev_priv->l3_parity.which_slice |= 1 << 1;
1080
1081 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1082 dev_priv->l3_parity.which_slice |= 1 << 0;
1083
944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1084 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
945} 1085}
946 1086
@@ -975,8 +1115,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
975 i915_handle_error(dev, false); 1115 i915_handle_error(dev, false);
976 } 1116 }
977 1117
978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1118 if (gt_iir & GT_PARITY_ERROR(dev))
979 ivybridge_parity_error_irq_handler(dev); 1119 ivybridge_parity_error_irq_handler(dev, gt_iir);
980} 1120}
981 1121
982#define HPD_STORM_DETECT_PERIOD 1000 1122#define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1190,102 @@ static void dp_aux_irq_handler(struct drm_device *dev)
1050 wake_up_all(&dev_priv->gmbus_wait_queue); 1190 wake_up_all(&dev_priv->gmbus_wait_queue);
1051} 1191}
1052 1192
1193#if defined(CONFIG_DEBUG_FS)
1194static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1195 uint32_t crc0, uint32_t crc1,
1196 uint32_t crc2, uint32_t crc3,
1197 uint32_t crc4)
1198{
1199 struct drm_i915_private *dev_priv = dev->dev_private;
1200 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1201 struct intel_pipe_crc_entry *entry;
1202 int head, tail;
1203
1204 spin_lock(&pipe_crc->lock);
1205
1206 if (!pipe_crc->entries) {
1207 spin_unlock(&pipe_crc->lock);
1208 DRM_ERROR("spurious interrupt\n");
1209 return;
1210 }
1211
1212 head = pipe_crc->head;
1213 tail = pipe_crc->tail;
1214
1215 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1216 spin_unlock(&pipe_crc->lock);
1217 DRM_ERROR("CRC buffer overflowing\n");
1218 return;
1219 }
1220
1221 entry = &pipe_crc->entries[head];
1222
1223 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1224 entry->crc[0] = crc0;
1225 entry->crc[1] = crc1;
1226 entry->crc[2] = crc2;
1227 entry->crc[3] = crc3;
1228 entry->crc[4] = crc4;
1229
1230 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1231 pipe_crc->head = head;
1232
1233 spin_unlock(&pipe_crc->lock);
1234
1235 wake_up_interruptible(&pipe_crc->wq);
1236}
1237#else
1238static inline void
1239display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1240 uint32_t crc0, uint32_t crc1,
1241 uint32_t crc2, uint32_t crc3,
1242 uint32_t crc4) {}
1243#endif
1244
1245
1246static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1247{
1248 struct drm_i915_private *dev_priv = dev->dev_private;
1249
1250 display_pipe_crc_irq_handler(dev, pipe,
1251 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1252 0, 0, 0, 0);
1253}
1254
1255static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1256{
1257 struct drm_i915_private *dev_priv = dev->dev_private;
1258
1259 display_pipe_crc_irq_handler(dev, pipe,
1260 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1261 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1262 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1263 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1264 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1265}
1266
1267static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1268{
1269 struct drm_i915_private *dev_priv = dev->dev_private;
1270 uint32_t res1, res2;
1271
1272 if (INTEL_INFO(dev)->gen >= 3)
1273 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1274 else
1275 res1 = 0;
1276
1277 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1278 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1279 else
1280 res2 = 0;
1281
1282 display_pipe_crc_irq_handler(dev, pipe,
1283 I915_READ(PIPE_CRC_RES_RED(pipe)),
1284 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1285 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1286 res1, res2);
1287}
1288
1053/* The RPS events need forcewake, so we add them to a work queue and mask their 1289/* The RPS events need forcewake, so we add them to a work queue and mask their
1054 * IMR bits until the work is done. Other interrupts can be processed without 1290 * IMR bits until the work is done. Other interrupts can be processed without
1055 * the work queue. */ 1291 * the work queue. */
@@ -1124,6 +1360,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1124 intel_prepare_page_flip(dev, pipe); 1360 intel_prepare_page_flip(dev, pipe);
1125 intel_finish_page_flip(dev, pipe); 1361 intel_finish_page_flip(dev, pipe);
1126 } 1362 }
1363
1364 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1365 i9xx_pipe_crc_irq_handler(dev, pipe);
1127 } 1366 }
1128 1367
1129 /* Consume port. Then clear IIR or we'll miss events */ 1368 /* Consume port. Then clear IIR or we'll miss events */
@@ -1212,21 +1451,26 @@ static void ivb_err_int_handler(struct drm_device *dev)
1212{ 1451{
1213 struct drm_i915_private *dev_priv = dev->dev_private; 1452 struct drm_i915_private *dev_priv = dev->dev_private;
1214 u32 err_int = I915_READ(GEN7_ERR_INT); 1453 u32 err_int = I915_READ(GEN7_ERR_INT);
1454 enum pipe pipe;
1215 1455
1216 if (err_int & ERR_INT_POISON) 1456 if (err_int & ERR_INT_POISON)
1217 DRM_ERROR("Poison interrupt\n"); 1457 DRM_ERROR("Poison interrupt\n");
1218 1458
1219 if (err_int & ERR_INT_FIFO_UNDERRUN_A) 1459 for_each_pipe(pipe) {
1220 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1460 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1221 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1461 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1222 1462 false))
1223 if (err_int & ERR_INT_FIFO_UNDERRUN_B) 1463 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1224 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1464 pipe_name(pipe));
1225 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1465 }
1226 1466
1227 if (err_int & ERR_INT_FIFO_UNDERRUN_C) 1467 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1228 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 1468 if (IS_IVYBRIDGE(dev))
1229 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 1469 ivb_pipe_crc_irq_handler(dev, pipe);
1470 else
1471 hsw_pipe_crc_irq_handler(dev, pipe);
1472 }
1473 }
1230 1474
1231 I915_WRITE(GEN7_ERR_INT, err_int); 1475 I915_WRITE(GEN7_ERR_INT, err_int);
1232} 1476}
@@ -1297,6 +1541,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1297static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1541static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1298{ 1542{
1299 struct drm_i915_private *dev_priv = dev->dev_private; 1543 struct drm_i915_private *dev_priv = dev->dev_private;
1544 enum pipe pipe;
1300 1545
1301 if (de_iir & DE_AUX_CHANNEL_A) 1546 if (de_iir & DE_AUX_CHANNEL_A)
1302 dp_aux_irq_handler(dev); 1547 dp_aux_irq_handler(dev);
@@ -1304,31 +1549,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1304 if (de_iir & DE_GSE) 1549 if (de_iir & DE_GSE)
1305 intel_opregion_asle_intr(dev); 1550 intel_opregion_asle_intr(dev);
1306 1551
1307 if (de_iir & DE_PIPEA_VBLANK)
1308 drm_handle_vblank(dev, 0);
1309
1310 if (de_iir & DE_PIPEB_VBLANK)
1311 drm_handle_vblank(dev, 1);
1312
1313 if (de_iir & DE_POISON) 1552 if (de_iir & DE_POISON)
1314 DRM_ERROR("Poison interrupt\n"); 1553 DRM_ERROR("Poison interrupt\n");
1315 1554
1316 if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1555 for_each_pipe(pipe) {
1317 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1556 if (de_iir & DE_PIPE_VBLANK(pipe))
1318 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1557 drm_handle_vblank(dev, pipe);
1319 1558
1320 if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1559 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1321 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1560 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1322 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1561 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1562 pipe_name(pipe));
1323 1563
1324 if (de_iir & DE_PLANEA_FLIP_DONE) { 1564 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1325 intel_prepare_page_flip(dev, 0); 1565 i9xx_pipe_crc_irq_handler(dev, pipe);
1326 intel_finish_page_flip_plane(dev, 0);
1327 }
1328 1566
1329 if (de_iir & DE_PLANEB_FLIP_DONE) { 1567 /* plane/pipes map 1:1 on ilk+ */
1330 intel_prepare_page_flip(dev, 1); 1568 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1331 intel_finish_page_flip_plane(dev, 1); 1569 intel_prepare_page_flip(dev, pipe);
1570 intel_finish_page_flip_plane(dev, pipe);
1571 }
1332 } 1572 }
1333 1573
1334 /* check event from PCH */ 1574 /* check event from PCH */
@@ -1351,7 +1591,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1351static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1591static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1352{ 1592{
1353 struct drm_i915_private *dev_priv = dev->dev_private; 1593 struct drm_i915_private *dev_priv = dev->dev_private;
1354 int i; 1594 enum pipe i;
1355 1595
1356 if (de_iir & DE_ERR_INT_IVB) 1596 if (de_iir & DE_ERR_INT_IVB)
1357 ivb_err_int_handler(dev); 1597 ivb_err_int_handler(dev);
@@ -1362,10 +1602,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1362 if (de_iir & DE_GSE_IVB) 1602 if (de_iir & DE_GSE_IVB)
1363 intel_opregion_asle_intr(dev); 1603 intel_opregion_asle_intr(dev);
1364 1604
1365 for (i = 0; i < 3; i++) { 1605 for_each_pipe(i) {
1366 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1606 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1367 drm_handle_vblank(dev, i); 1607 drm_handle_vblank(dev, i);
1368 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 1608
1609 /* plane/pipes map 1:1 on ilk+ */
1610 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1369 intel_prepare_page_flip(dev, i); 1611 intel_prepare_page_flip(dev, i);
1370 intel_finish_page_flip_plane(dev, i); 1612 intel_finish_page_flip_plane(dev, i);
1371 } 1613 }
@@ -1388,7 +1630,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1389 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1631 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1390 irqreturn_t ret = IRQ_NONE; 1632 irqreturn_t ret = IRQ_NONE;
1391 bool err_int_reenable = false;
1392 1633
1393 atomic_inc(&dev_priv->irq_received); 1634 atomic_inc(&dev_priv->irq_received);
1394 1635
@@ -1412,17 +1653,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1412 POSTING_READ(SDEIER); 1653 POSTING_READ(SDEIER);
1413 } 1654 }
1414 1655
1415 /* On Haswell, also mask ERR_INT because we don't want to risk
1416 * generating "unclaimed register" interrupts from inside the interrupt
1417 * handler. */
1418 if (IS_HASWELL(dev)) {
1419 spin_lock(&dev_priv->irq_lock);
1420 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1421 if (err_int_reenable)
1422 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1423 spin_unlock(&dev_priv->irq_lock);
1424 }
1425
1426 gt_iir = I915_READ(GTIIR); 1656 gt_iir = I915_READ(GTIIR);
1427 if (gt_iir) { 1657 if (gt_iir) {
1428 if (INTEL_INFO(dev)->gen >= 6) 1658 if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1682,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1452 } 1682 }
1453 } 1683 }
1454 1684
1455 if (err_int_reenable) {
1456 spin_lock(&dev_priv->irq_lock);
1457 if (ivb_can_enable_err_int(dev))
1458 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1459 spin_unlock(&dev_priv->irq_lock);
1460 }
1461
1462 I915_WRITE(DEIER, de_ier); 1685 I915_WRITE(DEIER, de_ier);
1463 POSTING_READ(DEIER); 1686 POSTING_READ(DEIER);
1464 if (!HAS_PCH_NOP(dev)) { 1687 if (!HAS_PCH_NOP(dev)) {
@@ -1787,7 +2010,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2010 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1788 unsigned long irqflags; 2011 unsigned long irqflags;
1789 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2012 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1790 DE_PIPE_VBLANK_ILK(pipe); 2013 DE_PIPE_VBLANK(pipe);
1791 2014
1792 if (!i915_pipe_enabled(dev, pipe)) 2015 if (!i915_pipe_enabled(dev, pipe))
1793 return -EINVAL; 2016 return -EINVAL;
@@ -1810,7 +2033,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1810 2033
1811 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2034 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1812 imr = I915_READ(VLV_IMR); 2035 imr = I915_READ(VLV_IMR);
1813 if (pipe == 0) 2036 if (pipe == PIPE_A)
1814 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2037 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1815 else 2038 else
1816 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2039 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1845,7 +2068,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1845 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2068 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1846 unsigned long irqflags; 2069 unsigned long irqflags;
1847 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2070 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1848 DE_PIPE_VBLANK_ILK(pipe); 2071 DE_PIPE_VBLANK(pipe);
1849 2072
1850 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2073 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1851 ironlake_disable_display_irq(dev_priv, bit); 2074 ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2085,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1862 i915_disable_pipestat(dev_priv, pipe, 2085 i915_disable_pipestat(dev_priv, pipe,
1863 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2086 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1864 imr = I915_READ(VLV_IMR); 2087 imr = I915_READ(VLV_IMR);
1865 if (pipe == 0) 2088 if (pipe == PIPE_A)
1866 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2089 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1867 else 2090 else
1868 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2091 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1965,6 +2188,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1965 if (tmp & RING_WAIT) { 2188 if (tmp & RING_WAIT) {
1966 DRM_ERROR("Kicking stuck wait on %s\n", 2189 DRM_ERROR("Kicking stuck wait on %s\n",
1967 ring->name); 2190 ring->name);
2191 i915_handle_error(dev, false);
1968 I915_WRITE_CTL(ring, tmp); 2192 I915_WRITE_CTL(ring, tmp);
1969 return HANGCHECK_KICK; 2193 return HANGCHECK_KICK;
1970 } 2194 }
@@ -1976,6 +2200,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1976 case 1: 2200 case 1:
1977 DRM_ERROR("Kicking stuck semaphore on %s\n", 2201 DRM_ERROR("Kicking stuck semaphore on %s\n",
1978 ring->name); 2202 ring->name);
2203 i915_handle_error(dev, false);
1979 I915_WRITE_CTL(ring, tmp); 2204 I915_WRITE_CTL(ring, tmp);
1980 return HANGCHECK_KICK; 2205 return HANGCHECK_KICK;
1981 case 0: 2206 case 0:
@@ -2021,12 +2246,21 @@ static void i915_hangcheck_elapsed(unsigned long data)
2021 2246
2022 if (ring->hangcheck.seqno == seqno) { 2247 if (ring->hangcheck.seqno == seqno) {
2023 if (ring_idle(ring, seqno)) { 2248 if (ring_idle(ring, seqno)) {
2249 ring->hangcheck.action = HANGCHECK_IDLE;
2250
2024 if (waitqueue_active(&ring->irq_queue)) { 2251 if (waitqueue_active(&ring->irq_queue)) {
2025 /* Issue a wake-up to catch stuck h/w. */ 2252 /* Issue a wake-up to catch stuck h/w. */
2026 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2253 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2027 ring->name); 2254 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2028 wake_up_all(&ring->irq_queue); 2255 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2029 ring->hangcheck.score += HUNG; 2256 ring->name);
2257 else
2258 DRM_INFO("Fake missed irq on %s\n",
2259 ring->name);
2260 wake_up_all(&ring->irq_queue);
2261 }
2262 /* Safeguard against driver failure */
2263 ring->hangcheck.score += BUSY;
2030 } else 2264 } else
2031 busy = false; 2265 busy = false;
2032 } else { 2266 } else {
@@ -2049,6 +2283,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2049 acthd); 2283 acthd);
2050 2284
2051 switch (ring->hangcheck.action) { 2285 switch (ring->hangcheck.action) {
2286 case HANGCHECK_IDLE:
2052 case HANGCHECK_WAIT: 2287 case HANGCHECK_WAIT:
2053 break; 2288 break;
2054 case HANGCHECK_ACTIVE: 2289 case HANGCHECK_ACTIVE:
@@ -2064,6 +2299,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
2064 } 2299 }
2065 } 2300 }
2066 } else { 2301 } else {
2302 ring->hangcheck.action = HANGCHECK_ACTIVE;
2303
2067 /* Gradually reduce the count so that we catch DoS 2304 /* Gradually reduce the count so that we catch DoS
2068 * attempts across multiple batches. 2305 * attempts across multiple batches.
2069 */ 2306 */
@@ -2254,10 +2491,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2254 pm_irqs = gt_irqs = 0; 2491 pm_irqs = gt_irqs = 0;
2255 2492
2256 dev_priv->gt_irq_mask = ~0; 2493 dev_priv->gt_irq_mask = ~0;
2257 if (HAS_L3_GPU_CACHE(dev)) { 2494 if (HAS_L3_DPF(dev)) {
2258 /* L3 parity interrupt is always unmasked. */ 2495 /* L3 parity interrupt is always unmasked. */
2259 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2496 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2260 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2497 gt_irqs |= GT_PARITY_ERROR(dev);
2261 } 2498 }
2262 2499
2263 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2500 gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2543,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2306 } else { 2543 } else {
2307 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2544 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2308 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2545 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2309 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2546 DE_AUX_CHANNEL_A |
2310 DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 2547 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2548 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2549 DE_POISON);
2311 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2550 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2312 } 2551 }
2313 2552
@@ -2341,7 +2580,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2341{ 2580{
2342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2343 u32 enable_mask; 2582 u32 enable_mask;
2344 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2583 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2584 PIPE_CRC_DONE_ENABLE;
2345 unsigned long irqflags; 2585 unsigned long irqflags;
2346 2586
2347 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2587 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2611,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2371 /* Interrupt setup is already guaranteed to be single-threaded, this is 2611 /* Interrupt setup is already guaranteed to be single-threaded, this is
2372 * just to make the assert_spin_locked check happy. */ 2612 * just to make the assert_spin_locked check happy. */
2373 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2613 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2374 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2614 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2375 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2615 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2376 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2616 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2377 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2617 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2378 2618
2379 I915_WRITE(VLV_IIR, 0xffffffff); 2619 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2464,6 +2704,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
2464static int i8xx_irq_postinstall(struct drm_device *dev) 2704static int i8xx_irq_postinstall(struct drm_device *dev)
2465{ 2705{
2466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2706 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2707 unsigned long irqflags;
2467 2708
2468 I915_WRITE16(EMR, 2709 I915_WRITE16(EMR,
2469 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2710 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +2725,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2484 I915_USER_INTERRUPT); 2725 I915_USER_INTERRUPT);
2485 POSTING_READ16(IER); 2726 POSTING_READ16(IER);
2486 2727
2728 /* Interrupt setup is already guaranteed to be single-threaded, this is
2729 * just to make the assert_spin_locked check happy. */
2730 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2731 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2732 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2733 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2734
2487 return 0; 2735 return 0;
2488} 2736}
2489 2737
@@ -2570,13 +2818,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2570 if (iir & I915_USER_INTERRUPT) 2818 if (iir & I915_USER_INTERRUPT)
2571 notify_ring(dev, &dev_priv->ring[RCS]); 2819 notify_ring(dev, &dev_priv->ring[RCS]);
2572 2820
2573 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2821 for_each_pipe(pipe) {
2574 i8xx_handle_vblank(dev, 0, iir)) 2822 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2575 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2823 i8xx_handle_vblank(dev, pipe, iir))
2824 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2576 2825
2577 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2826 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2578 i8xx_handle_vblank(dev, 1, iir)) 2827 i9xx_pipe_crc_irq_handler(dev, pipe);
2579 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2828 }
2580 2829
2581 iir = new_iir; 2830 iir = new_iir;
2582 } 2831 }
@@ -2623,6 +2872,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
2623{ 2872{
2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2873 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2625 u32 enable_mask; 2874 u32 enable_mask;
2875 unsigned long irqflags;
2626 2876
2627 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2877 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2628 2878
@@ -2658,6 +2908,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
2658 2908
2659 i915_enable_asle_pipestat(dev); 2909 i915_enable_asle_pipestat(dev);
2660 2910
2911 /* Interrupt setup is already guaranteed to be single-threaded, this is
2912 * just to make the assert_spin_locked check happy. */
2913 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2914 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2915 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2916 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2917
2661 return 0; 2918 return 0;
2662} 2919}
2663 2920
@@ -2769,6 +3026,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
2769 3026
2770 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3027 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2771 blc_event = true; 3028 blc_event = true;
3029
3030 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3031 i9xx_pipe_crc_irq_handler(dev, pipe);
2772 } 3032 }
2773 3033
2774 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3034 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3127,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
2867 /* Interrupt setup is already guaranteed to be single-threaded, this is 3127 /* Interrupt setup is already guaranteed to be single-threaded, this is
2868 * just to make the assert_spin_locked check happy. */ 3128 * just to make the assert_spin_locked check happy. */
2869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2870 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3130 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3131 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3132 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3133 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2872 3134
2873 /* 3135 /*
@@ -3013,6 +3275,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3013 3275
3014 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3276 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3015 blc_event = true; 3277 blc_event = true;
3278
3279 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3280 i9xx_pipe_crc_irq_handler(dev, pipe);
3016 } 3281 }
3017 3282
3018 3283
@@ -3122,18 +3387,21 @@ void intel_irq_init(struct drm_device *dev)
3122 3387
3123 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3388 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3124 3389
3125 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3390 if (IS_GEN2(dev)) {
3126 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3391 dev->max_vblank_count = 0;
3127 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3392 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3393 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3128 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3394 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3129 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3395 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3396 } else {
3397 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3398 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3130 } 3399 }
3131 3400
3132 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3401 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3133 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3402 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3134 else 3403 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3135 dev->driver->get_vblank_timestamp = NULL; 3404 }
3136 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3137 3405
3138 if (IS_VALLEYVIEW(dev)) { 3406 if (IS_VALLEYVIEW(dev)) {
3139 dev->driver->irq_handler = valleyview_irq_handler; 3407 dev->driver->irq_handler = valleyview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ef9b35479f01..3f303ba995c5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30 31
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 32#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -264,6 +265,11 @@
264#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ 265#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
265#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ 266#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
266#define MI_SEMAPHORE_SYNC_INVALID (3<<16) 267#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
268
269#define MI_PREDICATE_RESULT_2 (0x2214)
270#define LOWER_SLICE_ENABLED (1<<0)
271#define LOWER_SLICE_DISABLED (0<<0)
272
267/* 273/*
268 * 3D instructions used by the kernel 274 * 3D instructions used by the kernel
269 */ 275 */
@@ -346,12 +352,25 @@
346#define IOSF_PORT_PUNIT 0x4 352#define IOSF_PORT_PUNIT 0x4
347#define IOSF_PORT_NC 0x11 353#define IOSF_PORT_NC 0x11
348#define IOSF_PORT_DPIO 0x12 354#define IOSF_PORT_DPIO 0x12
355#define IOSF_PORT_GPIO_NC 0x13
356#define IOSF_PORT_CCK 0x14
357#define IOSF_PORT_CCU 0xA9
358#define IOSF_PORT_GPS_CORE 0x48
349#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) 359#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
350#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) 360#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
351 361
352#define PUNIT_OPCODE_REG_READ 6 362#define PUNIT_OPCODE_REG_READ 6
353#define PUNIT_OPCODE_REG_WRITE 7 363#define PUNIT_OPCODE_REG_WRITE 7
354 364
365#define PUNIT_REG_PWRGT_CTRL 0x60
366#define PUNIT_REG_PWRGT_STATUS 0x61
367#define PUNIT_CLK_GATE 1
368#define PUNIT_PWR_RESET 2
369#define PUNIT_PWR_GATE 3
370#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
371#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
372#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
373
355#define PUNIT_REG_GPU_LFM 0xd3 374#define PUNIT_REG_GPU_LFM 0xd3
356#define PUNIT_REG_GPU_FREQ_REQ 0xd4 375#define PUNIT_REG_GPU_FREQ_REQ 0xd4
357#define PUNIT_REG_GPU_FREQ_STS 0xd8 376#define PUNIT_REG_GPU_FREQ_STS 0xd8
@@ -372,6 +391,40 @@
372#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 391#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
373#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 392#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
374 393
394/* vlv2 north clock has */
395#define CCK_FUSE_REG 0x8
396#define CCK_FUSE_HPLL_FREQ_MASK 0x3
397#define CCK_REG_DSI_PLL_FUSE 0x44
398#define CCK_REG_DSI_PLL_CONTROL 0x48
399#define DSI_PLL_VCO_EN (1 << 31)
400#define DSI_PLL_LDO_GATE (1 << 30)
401#define DSI_PLL_P1_POST_DIV_SHIFT 17
402#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
403#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
404#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
405#define DSI_PLL_MUX_MASK (3 << 9)
406#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
407#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
408#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
409#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
410#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
411#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
412#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
413#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
414#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
415#define DSI_PLL_LOCK (1 << 0)
416#define CCK_REG_DSI_PLL_DIVIDER 0x4c
417#define DSI_PLL_LFSR (1 << 31)
418#define DSI_PLL_FRACTION_EN (1 << 30)
419#define DSI_PLL_FRAC_COUNTER_SHIFT 27
420#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
421#define DSI_PLL_USYNC_CNT_SHIFT 18
422#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
423#define DSI_PLL_N1_DIV_SHIFT 16
424#define DSI_PLL_N1_DIV_MASK (3 << 16)
425#define DSI_PLL_M1_DIV_SHIFT 0
426#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
427
375/* 428/*
376 * DPIO - a special bus for various display related registers to hide behind 429 * DPIO - a special bus for various display related registers to hide behind
377 * 430 *
@@ -387,11 +440,11 @@
387#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 440#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
388#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 441#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
389#define DPIO_SFR_BYPASS (1<<1) 442#define DPIO_SFR_BYPASS (1<<1)
390#define DPIO_RESET (1<<0) 443#define DPIO_CMNRST (1<<0)
391 444
392#define _DPIO_TX3_SWING_CTL4_A 0x690 445#define _DPIO_TX3_SWING_CTL4_A 0x690
393#define _DPIO_TX3_SWING_CTL4_B 0x2a90 446#define _DPIO_TX3_SWING_CTL4_B 0x2a90
394#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \ 447#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
395 _DPIO_TX3_SWING_CTL4_B) 448 _DPIO_TX3_SWING_CTL4_B)
396 449
397/* 450/*
@@ -669,13 +722,18 @@
669#define NOPID 0x02094 722#define NOPID 0x02094
670#define HWSTAM 0x02098 723#define HWSTAM 0x02098
671#define DMA_FADD_I8XX 0x020d0 724#define DMA_FADD_I8XX 0x020d0
725#define RING_BBSTATE(base) ((base)+0x110)
672 726
673#define ERROR_GEN6 0x040a0 727#define ERROR_GEN6 0x040a0
674#define GEN7_ERR_INT 0x44040 728#define GEN7_ERR_INT 0x44040
675#define ERR_INT_POISON (1<<31) 729#define ERR_INT_POISON (1<<31)
676#define ERR_INT_MMIO_UNCLAIMED (1<<13) 730#define ERR_INT_MMIO_UNCLAIMED (1<<13)
731#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
677#define ERR_INT_FIFO_UNDERRUN_C (1<<6) 732#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
733#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
678#define ERR_INT_FIFO_UNDERRUN_B (1<<3) 734#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
735#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
736#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
679#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 737#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
680#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) 738#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
681 739
@@ -890,6 +948,7 @@
890#define GT_BLT_USER_INTERRUPT (1 << 22) 948#define GT_BLT_USER_INTERRUPT (1 << 22)
891#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) 949#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
892#define GT_BSD_USER_INTERRUPT (1 << 12) 950#define GT_BSD_USER_INTERRUPT (1 << 12)
951#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
893#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ 952#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
894#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) 953#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
895#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) 954#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -900,6 +959,10 @@
900#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */ 959#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
901#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */ 960#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
902 961
962#define GT_PARITY_ERROR(dev) \
963 (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
964 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
965
903/* These are all the "old" interrupts */ 966/* These are all the "old" interrupts */
904#define ILK_BSD_USER_INTERRUPT (1<<5) 967#define ILK_BSD_USER_INTERRUPT (1<<5)
905#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 968#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
@@ -1048,9 +1111,6 @@
1048 _HSW_PIPE_SLICE_CHICKEN_1_A, + \ 1111 _HSW_PIPE_SLICE_CHICKEN_1_A, + \
1049 _HSW_PIPE_SLICE_CHICKEN_1_B) 1112 _HSW_PIPE_SLICE_CHICKEN_1_B)
1050 1113
1051#define HSW_CLKGATE_DISABLE_PART_1 0x46500
1052#define HSW_DPFC_GATING_DISABLE (1<<23)
1053
1054/* 1114/*
1055 * GPIO regs 1115 * GPIO regs
1056 */ 1116 */
@@ -1387,6 +1447,12 @@
1387 1447
1388#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) 1448#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
1389 1449
1450#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
1451#define CDCLK_FREQ_SHIFT 4
1452#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
1453#define CZCLK_FREQ_MASK 0xf
1454#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
1455
1390/* 1456/*
1391 * Palette regs 1457 * Palette regs
1392 */ 1458 */
@@ -1404,13 +1470,15 @@
1404 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in 1470 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
1405 * every way. It is not accessible from the CP register read instructions. 1471 * every way. It is not accessible from the CP register read instructions.
1406 * 1472 *
1473 * Starting from Haswell, you can't write registers using the MCHBAR mirror,
1474 * just read.
1407 */ 1475 */
1408#define MCHBAR_MIRROR_BASE 0x10000 1476#define MCHBAR_MIRROR_BASE 0x10000
1409 1477
1410#define MCHBAR_MIRROR_BASE_SNB 0x140000 1478#define MCHBAR_MIRROR_BASE_SNB 0x140000
1411 1479
1412/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ 1480/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
1413#define DCLK 0x5e04 1481#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
1414 1482
1415/** 915-945 and GM965 MCH register controlling DRAM channel access */ 1483/** 915-945 and GM965 MCH register controlling DRAM channel access */
1416#define DCC 0x10200 1484#define DCC 0x10200
@@ -1705,9 +1773,9 @@
1705#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 1773#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
1706#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) 1774#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
1707 1775
1708#define GEN6_GT_PERF_STATUS 0x145948 1776#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
1709#define GEN6_RP_STATE_LIMITS 0x145994 1777#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
1710#define GEN6_RP_STATE_CAP 0x145998 1778#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
1711 1779
1712/* 1780/*
1713 * Logical Context regs 1781 * Logical Context regs
@@ -1753,6 +1821,9 @@
1753 */ 1821 */
1754#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 1822#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1755 1823
1824#define VLV_CLK_CTL2 0x101104
1825#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
1826
1756/* 1827/*
1757 * Overlay regs 1828 * Overlay regs
1758 */ 1829 */
@@ -1771,6 +1842,83 @@
1771 * Display engine regs 1842 * Display engine regs
1772 */ 1843 */
1773 1844
1845/* Pipe A CRC regs */
1846#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
1847#define PIPE_CRC_ENABLE (1 << 31)
1848/* ivb+ source selection */
1849#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
1850#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
1851#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
1852/* ilk+ source selection */
1853#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
1854#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
1855#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
1856/* embedded DP port on the north display block, reserved on ivb */
1857#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
1858#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
1859/* vlv source selection */
1860#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
1861#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
1862#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
1863/* with DP port the pipe source is invalid */
1864#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
1865#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
1866#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
1867/* gen3+ source selection */
1868#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
1869#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
1870#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
1871/* with DP/TV port the pipe source is invalid */
1872#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
1873#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
1874#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
1875#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
1876#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
1877/* gen2 doesn't have source selection bits */
1878#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
1879
1880#define _PIPE_CRC_RES_1_A_IVB 0x60064
1881#define _PIPE_CRC_RES_2_A_IVB 0x60068
1882#define _PIPE_CRC_RES_3_A_IVB 0x6006c
1883#define _PIPE_CRC_RES_4_A_IVB 0x60070
1884#define _PIPE_CRC_RES_5_A_IVB 0x60074
1885
1886#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
1887#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
1888#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
1889#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
1890#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
1891
1892/* Pipe B CRC regs */
1893#define _PIPE_CRC_RES_1_B_IVB 0x61064
1894#define _PIPE_CRC_RES_2_B_IVB 0x61068
1895#define _PIPE_CRC_RES_3_B_IVB 0x6106c
1896#define _PIPE_CRC_RES_4_B_IVB 0x61070
1897#define _PIPE_CRC_RES_5_B_IVB 0x61074
1898
1899#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
1900#define PIPE_CRC_RES_1_IVB(pipe) \
1901 _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
1902#define PIPE_CRC_RES_2_IVB(pipe) \
1903 _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
1904#define PIPE_CRC_RES_3_IVB(pipe) \
1905 _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
1906#define PIPE_CRC_RES_4_IVB(pipe) \
1907 _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
1908#define PIPE_CRC_RES_5_IVB(pipe) \
1909 _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
1910
1911#define PIPE_CRC_RES_RED(pipe) \
1912 _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
1913#define PIPE_CRC_RES_GREEN(pipe) \
1914 _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
1915#define PIPE_CRC_RES_BLUE(pipe) \
1916 _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
1917#define PIPE_CRC_RES_RES1_I915(pipe) \
1918 _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
1919#define PIPE_CRC_RES_RES2_G4X(pipe) \
1920 _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
1921
1774/* Pipe A timing regs */ 1922/* Pipe A timing regs */
1775#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) 1923#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
1776#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) 1924#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
@@ -1793,7 +1941,6 @@
1793#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) 1941#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
1794#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) 1942#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
1795 1943
1796
1797#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) 1944#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
1798#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) 1945#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
1799#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) 1946#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
@@ -1804,7 +1951,8 @@
1804#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1951#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1805 1952
1806/* HSW eDP PSR registers */ 1953/* HSW eDP PSR registers */
1807#define EDP_PSR_CTL 0x64800 1954#define EDP_PSR_BASE(dev) 0x64800
1955#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
1808#define EDP_PSR_ENABLE (1<<31) 1956#define EDP_PSR_ENABLE (1<<31)
1809#define EDP_PSR_LINK_DISABLE (0<<27) 1957#define EDP_PSR_LINK_DISABLE (0<<27)
1810#define EDP_PSR_LINK_STANDBY (1<<27) 1958#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -1827,16 +1975,16 @@
1827#define EDP_PSR_TP1_TIME_0us (3<<4) 1975#define EDP_PSR_TP1_TIME_0us (3<<4)
1828#define EDP_PSR_IDLE_FRAME_SHIFT 0 1976#define EDP_PSR_IDLE_FRAME_SHIFT 0
1829 1977
1830#define EDP_PSR_AUX_CTL 0x64810 1978#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
1831#define EDP_PSR_AUX_DATA1 0x64814 1979#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
1832#define EDP_PSR_DPCD_COMMAND 0x80060000 1980#define EDP_PSR_DPCD_COMMAND 0x80060000
1833#define EDP_PSR_AUX_DATA2 0x64818 1981#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
1834#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) 1982#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
1835#define EDP_PSR_AUX_DATA3 0x6481c 1983#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
1836#define EDP_PSR_AUX_DATA4 0x64820 1984#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
1837#define EDP_PSR_AUX_DATA5 0x64824 1985#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
1838 1986
1839#define EDP_PSR_STATUS_CTL 0x64840 1987#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
1840#define EDP_PSR_STATUS_STATE_MASK (7<<29) 1988#define EDP_PSR_STATUS_STATE_MASK (7<<29)
1841#define EDP_PSR_STATUS_STATE_IDLE (0<<29) 1989#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
1842#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) 1990#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -1860,10 +2008,10 @@
1860#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) 2008#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
1861#define EDP_PSR_STATUS_IDLE_MASK 0xf 2009#define EDP_PSR_STATUS_IDLE_MASK 0xf
1862 2010
1863#define EDP_PSR_PERF_CNT 0x64844 2011#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
1864#define EDP_PSR_PERF_CNT_MASK 0xffffff 2012#define EDP_PSR_PERF_CNT_MASK 0xffffff
1865 2013
1866#define EDP_PSR_DEBUG_CTL 0x64860 2014#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
1867#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) 2015#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
1868#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) 2016#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
1869#define EDP_PSR_DEBUG_MASK_HPD (1<<25) 2017#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
@@ -2006,6 +2154,14 @@
2006#define PCH_HDMIC 0xe1150 2154#define PCH_HDMIC 0xe1150
2007#define PCH_HDMID 0xe1160 2155#define PCH_HDMID 0xe1160
2008 2156
2157#define PORT_DFT_I9XX 0x61150
2158#define DC_BALANCE_RESET (1 << 25)
2159#define PORT_DFT2_G4X 0x61154
2160#define DC_BALANCE_RESET_VLV (1 << 31)
2161#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
2162#define PIPE_B_SCRAMBLE_RESET (1 << 1)
2163#define PIPE_A_SCRAMBLE_RESET (1 << 0)
2164
2009/* Gen 3 SDVO bits: */ 2165/* Gen 3 SDVO bits: */
2010#define SDVO_ENABLE (1 << 31) 2166#define SDVO_ENABLE (1 << 31)
2011#define SDVO_PIPE_SEL(pipe) ((pipe) << 30) 2167#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
@@ -2034,6 +2190,7 @@
2034 2190
2035/* Gen 4 SDVO/HDMI bits: */ 2191/* Gen 4 SDVO/HDMI bits: */
2036#define SDVO_COLOR_FORMAT_8bpc (0 << 26) 2192#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
2193#define SDVO_COLOR_FORMAT_MASK (7 << 26)
2037#define SDVO_ENCODING_SDVO (0 << 10) 2194#define SDVO_ENCODING_SDVO (0 << 10)
2038#define SDVO_ENCODING_HDMI (2 << 10) 2195#define SDVO_ENCODING_HDMI (2 << 10)
2039#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ 2196#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
@@ -2986,6 +3143,7 @@
2986#define PIPECONF_DISABLE 0 3143#define PIPECONF_DISABLE 0
2987#define PIPECONF_DOUBLE_WIDE (1<<30) 3144#define PIPECONF_DOUBLE_WIDE (1<<30)
2988#define I965_PIPECONF_ACTIVE (1<<30) 3145#define I965_PIPECONF_ACTIVE (1<<30)
3146#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
2989#define PIPECONF_FRAME_START_DELAY_MASK (3<<27) 3147#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
2990#define PIPECONF_SINGLE_WIDE 0 3148#define PIPECONF_SINGLE_WIDE 0
2991#define PIPECONF_PIPE_UNLOCKED 0 3149#define PIPECONF_PIPE_UNLOCKED 0
@@ -3184,11 +3342,11 @@
3184 3342
3185/* define the Watermark register on Ironlake */ 3343/* define the Watermark register on Ironlake */
3186#define WM0_PIPEA_ILK 0x45100 3344#define WM0_PIPEA_ILK 0x45100
3187#define WM0_PIPE_PLANE_MASK (0x7f<<16) 3345#define WM0_PIPE_PLANE_MASK (0xffff<<16)
3188#define WM0_PIPE_PLANE_SHIFT 16 3346#define WM0_PIPE_PLANE_SHIFT 16
3189#define WM0_PIPE_SPRITE_MASK (0x3f<<8) 3347#define WM0_PIPE_SPRITE_MASK (0xff<<8)
3190#define WM0_PIPE_SPRITE_SHIFT 8 3348#define WM0_PIPE_SPRITE_SHIFT 8
3191#define WM0_PIPE_CURSOR_MASK (0x1f) 3349#define WM0_PIPE_CURSOR_MASK (0xff)
3192 3350
3193#define WM0_PIPEB_ILK 0x45104 3351#define WM0_PIPEB_ILK 0x45104
3194#define WM0_PIPEC_IVB 0x45200 3352#define WM0_PIPEC_IVB 0x45200
@@ -3198,9 +3356,9 @@
3198#define WM1_LP_LATENCY_MASK (0x7f<<24) 3356#define WM1_LP_LATENCY_MASK (0x7f<<24)
3199#define WM1_LP_FBC_MASK (0xf<<20) 3357#define WM1_LP_FBC_MASK (0xf<<20)
3200#define WM1_LP_FBC_SHIFT 20 3358#define WM1_LP_FBC_SHIFT 20
3201#define WM1_LP_SR_MASK (0x1ff<<8) 3359#define WM1_LP_SR_MASK (0x7ff<<8)
3202#define WM1_LP_SR_SHIFT 8 3360#define WM1_LP_SR_SHIFT 8
3203#define WM1_LP_CURSOR_MASK (0x3f) 3361#define WM1_LP_CURSOR_MASK (0xff)
3204#define WM2_LP_ILK 0x4510c 3362#define WM2_LP_ILK 0x4510c
3205#define WM2_LP_EN (1<<31) 3363#define WM2_LP_EN (1<<31)
3206#define WM3_LP_ILK 0x45110 3364#define WM3_LP_ILK 0x45110
@@ -3281,17 +3439,17 @@
3281 * } while (high1 != high2); 3439 * } while (high1 != high2);
3282 * frame = (high1 << 8) | low1; 3440 * frame = (high1 << 8) | low1;
3283 */ 3441 */
3284#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) 3442#define _PIPEAFRAMEHIGH 0x70040
3285#define PIPE_FRAME_HIGH_MASK 0x0000ffff 3443#define PIPE_FRAME_HIGH_MASK 0x0000ffff
3286#define PIPE_FRAME_HIGH_SHIFT 0 3444#define PIPE_FRAME_HIGH_SHIFT 0
3287#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) 3445#define _PIPEAFRAMEPIXEL 0x70044
3288#define PIPE_FRAME_LOW_MASK 0xff000000 3446#define PIPE_FRAME_LOW_MASK 0xff000000
3289#define PIPE_FRAME_LOW_SHIFT 24 3447#define PIPE_FRAME_LOW_SHIFT 24
3290#define PIPE_PIXEL_MASK 0x00ffffff 3448#define PIPE_PIXEL_MASK 0x00ffffff
3291#define PIPE_PIXEL_SHIFT 0 3449#define PIPE_PIXEL_SHIFT 0
3292/* GM45+ just has to be different */ 3450/* GM45+ just has to be different */
3293#define _PIPEA_FRMCOUNT_GM45 0x70040 3451#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040)
3294#define _PIPEA_FLIPCOUNT_GM45 0x70044 3452#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044)
3295#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 3453#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
3296 3454
3297/* Cursor A & B regs */ 3455/* Cursor A & B regs */
@@ -3422,10 +3580,10 @@
3422#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) 3580#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
3423#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) 3581#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
3424#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) 3582#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
3425#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) 3583#define _PIPEBFRAMEHIGH 0x71040
3426#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) 3584#define _PIPEBFRAMEPIXEL 0x71044
3427#define _PIPEB_FRMCOUNT_GM45 0x71040 3585#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040)
3428#define _PIPEB_FLIPCOUNT_GM45 0x71044 3586#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044)
3429 3587
3430 3588
3431/* Display B control */ 3589/* Display B control */
@@ -3780,6 +3938,7 @@
3780#define DE_SPRITEA_FLIP_DONE (1 << 28) 3938#define DE_SPRITEA_FLIP_DONE (1 << 28)
3781#define DE_PLANEB_FLIP_DONE (1 << 27) 3939#define DE_PLANEB_FLIP_DONE (1 << 27)
3782#define DE_PLANEA_FLIP_DONE (1 << 26) 3940#define DE_PLANEA_FLIP_DONE (1 << 26)
3941#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
3783#define DE_PCU_EVENT (1 << 25) 3942#define DE_PCU_EVENT (1 << 25)
3784#define DE_GTT_FAULT (1 << 24) 3943#define DE_GTT_FAULT (1 << 24)
3785#define DE_POISON (1 << 23) 3944#define DE_POISON (1 << 23)
@@ -3793,13 +3952,18 @@
3793#define DE_PIPEB_ODD_FIELD (1 << 13) 3952#define DE_PIPEB_ODD_FIELD (1 << 13)
3794#define DE_PIPEB_LINE_COMPARE (1 << 12) 3953#define DE_PIPEB_LINE_COMPARE (1 << 12)
3795#define DE_PIPEB_VSYNC (1 << 11) 3954#define DE_PIPEB_VSYNC (1 << 11)
3955#define DE_PIPEB_CRC_DONE (1 << 10)
3796#define DE_PIPEB_FIFO_UNDERRUN (1 << 8) 3956#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
3797#define DE_PIPEA_VBLANK (1 << 7) 3957#define DE_PIPEA_VBLANK (1 << 7)
3958#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
3798#define DE_PIPEA_EVEN_FIELD (1 << 6) 3959#define DE_PIPEA_EVEN_FIELD (1 << 6)
3799#define DE_PIPEA_ODD_FIELD (1 << 5) 3960#define DE_PIPEA_ODD_FIELD (1 << 5)
3800#define DE_PIPEA_LINE_COMPARE (1 << 4) 3961#define DE_PIPEA_LINE_COMPARE (1 << 4)
3801#define DE_PIPEA_VSYNC (1 << 3) 3962#define DE_PIPEA_VSYNC (1 << 3)
3963#define DE_PIPEA_CRC_DONE (1 << 2)
3964#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
3802#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) 3965#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
3966#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
3803 3967
3804/* More Ivybridge lolz */ 3968/* More Ivybridge lolz */
3805#define DE_ERR_INT_IVB (1<<30) 3969#define DE_ERR_INT_IVB (1<<30)
@@ -3815,9 +3979,8 @@
3815#define DE_PIPEB_VBLANK_IVB (1<<5) 3979#define DE_PIPEB_VBLANK_IVB (1<<5)
3816#define DE_SPRITEA_FLIP_DONE_IVB (1<<4) 3980#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
3817#define DE_PLANEA_FLIP_DONE_IVB (1<<3) 3981#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3982#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
3818#define DE_PIPEA_VBLANK_IVB (1<<0) 3983#define DE_PIPEA_VBLANK_IVB (1<<0)
3819
3820#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
3821#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) 3984#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
3822 3985
3823#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 3986#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
@@ -4416,6 +4579,8 @@
4416#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 4579#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
4417#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 4580#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
4418#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 4581#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
4582#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
4583#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
4419#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 4584#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
4420#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 4585#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
4421 4586
@@ -4447,7 +4612,6 @@
4447#define PANEL_PORT_SELECT_MASK (3 << 30) 4612#define PANEL_PORT_SELECT_MASK (3 << 30)
4448#define PANEL_PORT_SELECT_LVDS (0 << 30) 4613#define PANEL_PORT_SELECT_LVDS (0 << 30)
4449#define PANEL_PORT_SELECT_DPA (1 << 30) 4614#define PANEL_PORT_SELECT_DPA (1 << 30)
4450#define EDP_PANEL (1 << 30)
4451#define PANEL_PORT_SELECT_DPC (2 << 30) 4615#define PANEL_PORT_SELECT_DPC (2 << 30)
4452#define PANEL_PORT_SELECT_DPD (3 << 30) 4616#define PANEL_PORT_SELECT_DPD (3 << 30)
4453#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) 4617#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
@@ -4456,11 +4620,6 @@
4456#define PANEL_LIGHT_ON_DELAY_SHIFT 0 4620#define PANEL_LIGHT_ON_DELAY_SHIFT 0
4457 4621
4458#define PCH_PP_OFF_DELAYS 0xc720c 4622#define PCH_PP_OFF_DELAYS 0xc720c
4459#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
4460#define PANEL_POWER_PORT_LVDS (0 << 30)
4461#define PANEL_POWER_PORT_DP_A (1 << 30)
4462#define PANEL_POWER_PORT_DP_C (2 << 30)
4463#define PANEL_POWER_PORT_DP_D (3 << 30)
4464#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) 4623#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
4465#define PANEL_POWER_DOWN_DELAY_SHIFT 16 4624#define PANEL_POWER_DOWN_DELAY_SHIFT 16
4466#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) 4625#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4638,7 +4797,7 @@
4638#define GEN6_RP_UP_IDLE_MIN (0x1<<3) 4797#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
4639#define GEN6_RP_UP_BUSY_AVG (0x2<<3) 4798#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
4640#define GEN6_RP_UP_BUSY_CONT (0x4<<3) 4799#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
4641#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0) 4800#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
4642#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) 4801#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
4643#define GEN6_RP_UP_THRESHOLD 0xA02C 4802#define GEN6_RP_UP_THRESHOLD 0xA02C
4644#define GEN6_RP_DOWN_THRESHOLD 0xA030 4803#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4683,6 +4842,10 @@
4683 GEN6_PM_RP_DOWN_TIMEOUT) 4842 GEN6_PM_RP_DOWN_TIMEOUT)
4684 4843
4685#define GEN6_GT_GFX_RC6_LOCKED 0x138104 4844#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4845#define VLV_COUNTER_CONTROL 0x138104
4846#define VLV_COUNT_RANGE_HIGH (1<<15)
4847#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
4848#define VLV_RENDER_RC6_COUNT_EN (1<<0)
4686#define GEN6_GT_GFX_RC6 0x138108 4849#define GEN6_GT_GFX_RC6 0x138108
4687#define GEN6_GT_GFX_RC6p 0x13810C 4850#define GEN6_GT_GFX_RC6p 0x13810C
4688#define GEN6_GT_GFX_RC6pp 0x138110 4851#define GEN6_GT_GFX_RC6pp 0x138110
@@ -4694,6 +4857,8 @@
4694#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 4857#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
4695#define GEN6_PCODE_WRITE_RC6VIDS 0x4 4858#define GEN6_PCODE_WRITE_RC6VIDS 0x4
4696#define GEN6_PCODE_READ_RC6VIDS 0x5 4859#define GEN6_PCODE_READ_RC6VIDS 0x5
4860#define GEN6_PCODE_READ_D_COMP 0x10
4861#define GEN6_PCODE_WRITE_D_COMP 0x11
4697#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 4862#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
4698#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 4863#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
4699#define GEN6_PCODE_DATA 0x138128 4864#define GEN6_PCODE_DATA 0x138128
@@ -4713,6 +4878,7 @@
4713 4878
4714/* IVYBRIDGE DPF */ 4879/* IVYBRIDGE DPF */
4715#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ 4880#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
4881#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
4716#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) 4882#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
4717#define GEN7_PARITY_ERROR_VALID (1<<13) 4883#define GEN7_PARITY_ERROR_VALID (1<<13)
4718#define GEN7_L3CDERRST1_BANK_MASK (3<<11) 4884#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
@@ -4726,6 +4892,7 @@
4726#define GEN7_L3CDERRST1_ENABLE (1<<7) 4892#define GEN7_L3CDERRST1_ENABLE (1<<7)
4727 4893
4728#define GEN7_L3LOG_BASE 0xB070 4894#define GEN7_L3LOG_BASE 0xB070
4895#define HSW_L3LOG_BASE_SLICE1 0xB270
4729#define GEN7_L3LOG_SIZE 0x80 4896#define GEN7_L3LOG_SIZE 0x80
4730 4897
4731#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ 4898#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
@@ -4804,7 +4971,17 @@
4804#define AUD_CONFIG_LOWER_N_SHIFT 4 4971#define AUD_CONFIG_LOWER_N_SHIFT 4
4805#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) 4972#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
4806#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 4973#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
4807#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) 4974#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
4975#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
4976#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
4977#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
4978#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
4979#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
4980#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
4981#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
4982#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
4983#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
4984#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
4808#define AUD_CONFIG_DISABLE_NCTS (1 << 3) 4985#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
4809 4986
4810/* HSW Audio */ 4987/* HSW Audio */
@@ -5128,4 +5305,414 @@
5128#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) 5305#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
5129#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) 5306#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
5130 5307
5308/* VLV MIPI registers */
5309
5310#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
5311#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
5312#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
5313#define DPI_ENABLE (1 << 31) /* A + B */
5314#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
5315#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
5316#define DUAL_LINK_MODE_MASK (1 << 26)
5317#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
5318#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
5319#define DITHERING_ENABLE (1 << 25) /* A + B */
5320#define FLOPPED_HSTX (1 << 23)
5321#define DE_INVERT (1 << 19) /* XXX */
5322#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
5323#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
5324#define AFE_LATCHOUT (1 << 17)
5325#define LP_OUTPUT_HOLD (1 << 16)
5326#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
5327#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
5328#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
5329#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
5330#define CSB_SHIFT 9
5331#define CSB_MASK (3 << 9)
5332#define CSB_20MHZ (0 << 9)
5333#define CSB_10MHZ (1 << 9)
5334#define CSB_40MHZ (2 << 9)
5335#define BANDGAP_MASK (1 << 8)
5336#define BANDGAP_PNW_CIRCUIT (0 << 8)
5337#define BANDGAP_LNC_CIRCUIT (1 << 8)
5338#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
5339#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
5340#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
5341#define TEARING_EFFECT_SHIFT 2 /* A + B */
5342#define TEARING_EFFECT_MASK (3 << 2)
5343#define TEARING_EFFECT_OFF (0 << 2)
5344#define TEARING_EFFECT_DSI (1 << 2)
5345#define TEARING_EFFECT_GPIO (2 << 2)
5346#define LANE_CONFIGURATION_SHIFT 0
5347#define LANE_CONFIGURATION_MASK (3 << 0)
5348#define LANE_CONFIGURATION_4LANE (0 << 0)
5349#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
5350#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
5351
5352#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
5353#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
5354#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
5355#define TEARING_EFFECT_DELAY_SHIFT 0
5356#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
5357
5358/* XXX: all bits reserved */
5359#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
5360
5361/* MIPI DSI Controller and D-PHY registers */
5362
5363#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
5364#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
5365#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
5366#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
5367#define ULPS_STATE_MASK (3 << 1)
5368#define ULPS_STATE_ENTER (2 << 1)
5369#define ULPS_STATE_EXIT (1 << 1)
5370#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
5371#define DEVICE_READY (1 << 0)
5372
5373#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
5374#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
5375#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
5376#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
5377#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
5378#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
5379#define TEARING_EFFECT (1 << 31)
5380#define SPL_PKT_SENT_INTERRUPT (1 << 30)
5381#define GEN_READ_DATA_AVAIL (1 << 29)
5382#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
5383#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
5384#define RX_PROT_VIOLATION (1 << 26)
5385#define RX_INVALID_TX_LENGTH (1 << 25)
5386#define ACK_WITH_NO_ERROR (1 << 24)
5387#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
5388#define LP_RX_TIMEOUT (1 << 22)
5389#define HS_TX_TIMEOUT (1 << 21)
5390#define DPI_FIFO_UNDERRUN (1 << 20)
5391#define LOW_CONTENTION (1 << 19)
5392#define HIGH_CONTENTION (1 << 18)
5393#define TXDSI_VC_ID_INVALID (1 << 17)
5394#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
5395#define TXCHECKSUM_ERROR (1 << 15)
5396#define TXECC_MULTIBIT_ERROR (1 << 14)
5397#define TXECC_SINGLE_BIT_ERROR (1 << 13)
5398#define TXFALSE_CONTROL_ERROR (1 << 12)
5399#define RXDSI_VC_ID_INVALID (1 << 11)
5400#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
5401#define RXCHECKSUM_ERROR (1 << 9)
5402#define RXECC_MULTIBIT_ERROR (1 << 8)
5403#define RXECC_SINGLE_BIT_ERROR (1 << 7)
5404#define RXFALSE_CONTROL_ERROR (1 << 6)
5405#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
5406#define RX_LP_TX_SYNC_ERROR (1 << 4)
5407#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
5408#define RXEOT_SYNC_ERROR (1 << 2)
5409#define RXSOT_SYNC_ERROR (1 << 1)
5410#define RXSOT_ERROR (1 << 0)
5411
5412#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
5413#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
5414#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
5415#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
5416#define CMD_MODE_NOT_SUPPORTED (0 << 13)
5417#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
5418#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
5419#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
5420#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
5421#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
5422#define VID_MODE_FORMAT_MASK (0xf << 7)
5423#define VID_MODE_NOT_SUPPORTED (0 << 7)
5424#define VID_MODE_FORMAT_RGB565 (1 << 7)
5425#define VID_MODE_FORMAT_RGB666 (2 << 7)
5426#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
5427#define VID_MODE_FORMAT_RGB888 (4 << 7)
5428#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
5429#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
5430#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
5431#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
5432#define DATA_LANES_PRG_REG_SHIFT 0
5433#define DATA_LANES_PRG_REG_MASK (7 << 0)
5434
5435#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
5436#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
5437#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
5438#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
5439
5440#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
5441#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
5442#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
5443#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
5444
5445#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
5446#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
5447#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
5448#define TURN_AROUND_TIMEOUT_MASK 0x3f
5449
5450#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
5451#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
5452#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
5453#define DEVICE_RESET_TIMER_MASK 0xffff
5454
5455#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
5456#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
5457#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
5458#define VERTICAL_ADDRESS_SHIFT 16
5459#define VERTICAL_ADDRESS_MASK (0xffff << 16)
5460#define HORIZONTAL_ADDRESS_SHIFT 0
5461#define HORIZONTAL_ADDRESS_MASK 0xffff
5462
5463#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
5464#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
5465#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
5466#define DBI_FIFO_EMPTY_HALF (0 << 0)
5467#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
5468#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
5469
5470/* regs below are bits 15:0 */
5471#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
5472#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
5473#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
5474
5475#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
5476#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
5477#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
5478
5479#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
5480#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
5481#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
5482
5483#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
5484#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
5485#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
5486
5487#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
5488#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
5489#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
5490
5491#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
5492#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
5493#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
5494
5495#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
5496#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
5497#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
5498
5499#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
5500#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
5501#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
5502/* regs above are bits 15:0 */
5503
5504#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
5505#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
5506#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
5507#define DPI_LP_MODE (1 << 6)
5508#define BACKLIGHT_OFF (1 << 5)
5509#define BACKLIGHT_ON (1 << 4)
5510#define COLOR_MODE_OFF (1 << 3)
5511#define COLOR_MODE_ON (1 << 2)
5512#define TURN_ON (1 << 1)
5513#define SHUTDOWN (1 << 0)
5514
5515#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
5516#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
5517#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
5518#define COMMAND_BYTE_SHIFT 0
5519#define COMMAND_BYTE_MASK (0x3f << 0)
5520
5521#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
5522#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
5523#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
5524#define MASTER_INIT_TIMER_SHIFT 0
5525#define MASTER_INIT_TIMER_MASK (0xffff << 0)
5526
5527#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
5528#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
5529#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
5530#define MAX_RETURN_PKT_SIZE_SHIFT 0
5531#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
5532
5533#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
5534#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
5535#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
5536#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
5537#define DISABLE_VIDEO_BTA (1 << 3)
5538#define IP_TG_CONFIG (1 << 2)
5539#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
5540#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
5541#define VIDEO_MODE_BURST (3 << 0)
5542
5543#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
5544#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
5545#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
5546#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
5547#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
5548#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
5549#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
5550#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
5551#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
5552#define CLOCKSTOP (1 << 1)
5553#define EOT_DISABLE (1 << 0)
5554
5555#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
5556#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
5557#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
5558#define LP_BYTECLK_SHIFT 0
5559#define LP_BYTECLK_MASK (0xffff << 0)
5560
5561/* bits 31:0 */
5562#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
5563#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
5564#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
5565
5566/* bits 31:0 */
5567#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
5568#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
5569#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
5570
5571#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
5572#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
5573#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
5574#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
5575#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
5576#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
5577#define LONG_PACKET_WORD_COUNT_SHIFT 8
5578#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
5579#define SHORT_PACKET_PARAM_SHIFT 8
5580#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
5581#define VIRTUAL_CHANNEL_SHIFT 6
5582#define VIRTUAL_CHANNEL_MASK (3 << 6)
5583#define DATA_TYPE_SHIFT 0
5584#define DATA_TYPE_MASK (3f << 0)
5585/* data type values, see include/video/mipi_display.h */
5586
5587#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
5588#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
5589#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
5590#define DPI_FIFO_EMPTY (1 << 28)
5591#define DBI_FIFO_EMPTY (1 << 27)
5592#define LP_CTRL_FIFO_EMPTY (1 << 26)
5593#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
5594#define LP_CTRL_FIFO_FULL (1 << 24)
5595#define HS_CTRL_FIFO_EMPTY (1 << 18)
5596#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
5597#define HS_CTRL_FIFO_FULL (1 << 16)
5598#define LP_DATA_FIFO_EMPTY (1 << 10)
5599#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
5600#define LP_DATA_FIFO_FULL (1 << 8)
5601#define HS_DATA_FIFO_EMPTY (1 << 2)
5602#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
5603#define HS_DATA_FIFO_FULL (1 << 0)
5604
5605#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
5606#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
5607#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
5608#define DBI_HS_LP_MODE_MASK (1 << 0)
5609#define DBI_LP_MODE (1 << 0)
5610#define DBI_HS_MODE (0 << 0)
5611
5612#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
5613#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
5614#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
5615#define EXIT_ZERO_COUNT_SHIFT 24
5616#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
5617#define TRAIL_COUNT_SHIFT 16
5618#define TRAIL_COUNT_MASK (0x1f << 16)
5619#define CLK_ZERO_COUNT_SHIFT 8
5620#define CLK_ZERO_COUNT_MASK (0xff << 8)
5621#define PREPARE_COUNT_SHIFT 0
5622#define PREPARE_COUNT_MASK (0x3f << 0)
5623
5624/* bits 31:0 */
5625#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
5626#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
5627#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
5628
5629#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
5630#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
5631#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
5632#define LP_HS_SSW_CNT_SHIFT 16
5633#define LP_HS_SSW_CNT_MASK (0xffff << 16)
5634#define HS_LP_PWR_SW_CNT_SHIFT 0
5635#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
5636
5637#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
5638#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
5639#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
5640#define STOP_STATE_STALL_COUNTER_SHIFT 0
5641#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
5642
5643#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
5644#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
5645#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
5646#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
5647#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
5648#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
5649#define RX_CONTENTION_DETECTED (1 << 0)
5650
5651/* XXX: only pipe A ?!? */
5652#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
5653#define DBI_TYPEC_ENABLE (1 << 31)
5654#define DBI_TYPEC_WIP (1 << 30)
5655#define DBI_TYPEC_OPTION_SHIFT 28
5656#define DBI_TYPEC_OPTION_MASK (3 << 28)
5657#define DBI_TYPEC_FREQ_SHIFT 24
5658#define DBI_TYPEC_FREQ_MASK (0xf << 24)
5659#define DBI_TYPEC_OVERRIDE (1 << 8)
5660#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
5661#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
5662
5663
5664/* MIPI adapter registers */
5665
5666#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
5667#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
5668#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
5669#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
5670#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
5671#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
5672#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
5673#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
5674#define READ_REQUEST_PRIORITY_SHIFT 3
5675#define READ_REQUEST_PRIORITY_MASK (3 << 3)
5676#define READ_REQUEST_PRIORITY_LOW (0 << 3)
5677#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
5678#define RGB_FLIP_TO_BGR (1 << 2)
5679
5680#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
5681#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
5682#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
5683#define DATA_MEM_ADDRESS_SHIFT 5
5684#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
5685#define DATA_VALID (1 << 0)
5686
5687#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
5688#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
5689#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
5690#define DATA_LENGTH_SHIFT 0
5691#define DATA_LENGTH_MASK (0xfffff << 0)
5692
5693#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
5694#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
5695#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
5696#define COMMAND_MEM_ADDRESS_SHIFT 5
5697#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
5698#define AUTO_PWG_ENABLE (1 << 2)
5699#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
5700#define COMMAND_VALID (1 << 0)
5701
5702#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
5703#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
5704#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
5705#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
5706#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
5707
5708#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
5709#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
5710#define MIPI_READ_DATA_RETURN(pipe, n) \
5711 (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
5712
5713#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
5714#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
5715#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
5716#define READ_DATA_VALID(n) (1 << (n))
5717
5131#endif /* _I915_REG_H_ */ 5718#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 70db618989c4..a088f1f46bdb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -340,7 +340,9 @@ int i915_save_state(struct drm_device *dev)
340 struct drm_i915_private *dev_priv = dev->dev_private; 340 struct drm_i915_private *dev_priv = dev->dev_private;
341 int i; 341 int i;
342 342
343 pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); 343 if (INTEL_INFO(dev)->gen <= 4)
344 pci_read_config_byte(dev->pdev, LBB,
345 &dev_priv->regfile.saveLBB);
344 346
345 mutex_lock(&dev->struct_mutex); 347 mutex_lock(&dev->struct_mutex);
346 348
@@ -367,7 +369,8 @@ int i915_save_state(struct drm_device *dev)
367 intel_disable_gt_powersave(dev); 369 intel_disable_gt_powersave(dev);
368 370
369 /* Cache mode state */ 371 /* Cache mode state */
370 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 372 if (INTEL_INFO(dev)->gen < 7)
373 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
371 374
372 /* Memory Arbitration state */ 375 /* Memory Arbitration state */
373 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 376 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +393,9 @@ int i915_restore_state(struct drm_device *dev)
390 struct drm_i915_private *dev_priv = dev->dev_private; 393 struct drm_i915_private *dev_priv = dev->dev_private;
391 int i; 394 int i;
392 395
393 pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); 396 if (INTEL_INFO(dev)->gen <= 4)
397 pci_write_config_byte(dev->pdev, LBB,
398 dev_priv->regfile.saveLBB);
394 399
395 mutex_lock(&dev->struct_mutex); 400 mutex_lock(&dev->struct_mutex);
396 401
@@ -414,7 +419,9 @@ int i915_restore_state(struct drm_device *dev)
414 } 419 }
415 420
416 /* Cache mode state */ 421 /* Cache mode state */
417 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); 422 if (INTEL_INFO(dev)->gen < 7)
423 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
424 0xffff0000);
418 425
419 /* Memory arbitration state */ 426 /* Memory arbitration state */
420 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); 427 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c8c4112de110..9ff1e4d96909 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -37,12 +37,30 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
37{ 37{
38 struct drm_i915_private *dev_priv = dev->dev_private; 38 struct drm_i915_private *dev_priv = dev->dev_private;
39 u64 raw_time; /* 32b value may overflow during fixed point math */ 39 u64 raw_time; /* 32b value may overflow during fixed point math */
40 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
40 41
41 if (!intel_enable_rc6(dev)) 42 if (!intel_enable_rc6(dev))
42 return 0; 43 return 0;
43 44
44 raw_time = I915_READ(reg) * 128ULL; 45 /* On VLV, residency time is in CZ units rather than 1.28us */
45 return DIV_ROUND_UP_ULL(raw_time, 100000); 46 if (IS_VALLEYVIEW(dev)) {
47 u32 clkctl2;
48
49 clkctl2 = I915_READ(VLV_CLK_CTL2) >>
50 CLK_CTL2_CZCOUNT_30NS_SHIFT;
51 if (!clkctl2) {
52 WARN(!clkctl2, "bogus CZ count value");
53 return 0;
54 }
55 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
56 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
57 units <<= 8;
58
59 div = 1000000ULL * bias;
60 }
61
62 raw_time = I915_READ(reg) * units;
63 return DIV_ROUND_UP_ULL(raw_time, div);
46} 64}
47 65
48static ssize_t 66static ssize_t
@@ -65,6 +83,8 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
65{ 83{
66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 84 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 85 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
86 if (IS_VALLEYVIEW(dminor->dev))
87 rc6p_residency = 0;
68 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); 88 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
69} 89}
70 90
@@ -73,6 +93,8 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
73{ 93{
74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 94 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 95 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
96 if (IS_VALLEYVIEW(dminor->dev))
97 rc6pp_residency = 0;
76 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 98 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
77} 99}
78 100
@@ -97,7 +119,7 @@ static struct attribute_group rc6_attr_group = {
97 119
98static int l3_access_valid(struct drm_device *dev, loff_t offset) 120static int l3_access_valid(struct drm_device *dev, loff_t offset)
99{ 121{
100 if (!HAS_L3_GPU_CACHE(dev)) 122 if (!HAS_L3_DPF(dev))
101 return -EPERM; 123 return -EPERM;
102 124
103 if (offset % 4 != 0) 125 if (offset % 4 != 0)
@@ -118,28 +140,31 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
118 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 140 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
119 struct drm_device *drm_dev = dminor->dev; 141 struct drm_device *drm_dev = dminor->dev;
120 struct drm_i915_private *dev_priv = drm_dev->dev_private; 142 struct drm_i915_private *dev_priv = drm_dev->dev_private;
121 uint32_t misccpctl; 143 int slice = (int)(uintptr_t)attr->private;
122 int i, ret; 144 int ret;
145
146 count = round_down(count, 4);
123 147
124 ret = l3_access_valid(drm_dev, offset); 148 ret = l3_access_valid(drm_dev, offset);
125 if (ret) 149 if (ret)
126 return ret; 150 return ret;
127 151
152 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
153
128 ret = i915_mutex_lock_interruptible(drm_dev); 154 ret = i915_mutex_lock_interruptible(drm_dev);
129 if (ret) 155 if (ret)
130 return ret; 156 return ret;
131 157
132 misccpctl = I915_READ(GEN7_MISCCPCTL); 158 if (dev_priv->l3_parity.remap_info[slice])
133 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 159 memcpy(buf,
134 160 dev_priv->l3_parity.remap_info[slice] + (offset/4),
135 for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) 161 count);
136 *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); 162 else
137 163 memset(buf, 0, count);
138 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
139 164
140 mutex_unlock(&drm_dev->struct_mutex); 165 mutex_unlock(&drm_dev->struct_mutex);
141 166
142 return i - offset; 167 return count;
143} 168}
144 169
145static ssize_t 170static ssize_t
@@ -151,18 +176,23 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
151 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 176 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
152 struct drm_device *drm_dev = dminor->dev; 177 struct drm_device *drm_dev = dminor->dev;
153 struct drm_i915_private *dev_priv = drm_dev->dev_private; 178 struct drm_i915_private *dev_priv = drm_dev->dev_private;
179 struct i915_hw_context *ctx;
154 u32 *temp = NULL; /* Just here to make handling failures easy */ 180 u32 *temp = NULL; /* Just here to make handling failures easy */
181 int slice = (int)(uintptr_t)attr->private;
155 int ret; 182 int ret;
156 183
157 ret = l3_access_valid(drm_dev, offset); 184 ret = l3_access_valid(drm_dev, offset);
158 if (ret) 185 if (ret)
159 return ret; 186 return ret;
160 187
188 if (dev_priv->hw_contexts_disabled)
189 return -ENXIO;
190
161 ret = i915_mutex_lock_interruptible(drm_dev); 191 ret = i915_mutex_lock_interruptible(drm_dev);
162 if (ret) 192 if (ret)
163 return ret; 193 return ret;
164 194
165 if (!dev_priv->l3_parity.remap_info) { 195 if (!dev_priv->l3_parity.remap_info[slice]) {
166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 196 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
167 if (!temp) { 197 if (!temp) {
168 mutex_unlock(&drm_dev->struct_mutex); 198 mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +212,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
182 * at this point it is left as a TODO. 212 * at this point it is left as a TODO.
183 */ 213 */
184 if (temp) 214 if (temp)
185 dev_priv->l3_parity.remap_info = temp; 215 dev_priv->l3_parity.remap_info[slice] = temp;
186 216
187 memcpy(dev_priv->l3_parity.remap_info + (offset/4), 217 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
188 buf + (offset/4),
189 count);
190 218
191 i915_gem_l3_remap(drm_dev); 219 /* NB: We defer the remapping until we switch to the context */
220 list_for_each_entry(ctx, &dev_priv->context_list, link)
221 ctx->remap_slice |= (1<<slice);
192 222
193 mutex_unlock(&drm_dev->struct_mutex); 223 mutex_unlock(&drm_dev->struct_mutex);
194 224
@@ -200,7 +230,17 @@ static struct bin_attribute dpf_attrs = {
200 .size = GEN7_L3LOG_SIZE, 230 .size = GEN7_L3LOG_SIZE,
201 .read = i915_l3_read, 231 .read = i915_l3_read,
202 .write = i915_l3_write, 232 .write = i915_l3_write,
203 .mmap = NULL 233 .mmap = NULL,
234 .private = (void *)0
235};
236
237static struct bin_attribute dpf_attrs_1 = {
238 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
239 .size = GEN7_L3LOG_SIZE,
240 .read = i915_l3_read,
241 .write = i915_l3_write,
242 .mmap = NULL,
243 .private = (void *)1
204}; 244};
205 245
206static ssize_t gt_cur_freq_mhz_show(struct device *kdev, 246static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
@@ -211,6 +251,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
211 struct drm_i915_private *dev_priv = dev->dev_private; 251 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret; 252 int ret;
213 253
254 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
255
214 mutex_lock(&dev_priv->rps.hw_lock); 256 mutex_lock(&dev_priv->rps.hw_lock);
215 if (IS_VALLEYVIEW(dev_priv->dev)) { 257 if (IS_VALLEYVIEW(dev_priv->dev)) {
216 u32 freq; 258 u32 freq;
@@ -243,6 +285,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
243 struct drm_i915_private *dev_priv = dev->dev_private; 285 struct drm_i915_private *dev_priv = dev->dev_private;
244 int ret; 286 int ret;
245 287
288 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
289
246 mutex_lock(&dev_priv->rps.hw_lock); 290 mutex_lock(&dev_priv->rps.hw_lock);
247 if (IS_VALLEYVIEW(dev_priv->dev)) 291 if (IS_VALLEYVIEW(dev_priv->dev))
248 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); 292 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -267,6 +311,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
267 if (ret) 311 if (ret)
268 return ret; 312 return ret;
269 313
314 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
315
270 mutex_lock(&dev_priv->rps.hw_lock); 316 mutex_lock(&dev_priv->rps.hw_lock);
271 317
272 if (IS_VALLEYVIEW(dev_priv->dev)) { 318 if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -315,6 +361,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
315 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
316 int ret; 362 int ret;
317 363
364 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
365
318 mutex_lock(&dev_priv->rps.hw_lock); 366 mutex_lock(&dev_priv->rps.hw_lock);
319 if (IS_VALLEYVIEW(dev_priv->dev)) 367 if (IS_VALLEYVIEW(dev_priv->dev))
320 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); 368 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -339,6 +387,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
339 if (ret) 387 if (ret)
340 return ret; 388 return ret;
341 389
390 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
391
342 mutex_lock(&dev_priv->rps.hw_lock); 392 mutex_lock(&dev_priv->rps.hw_lock);
343 393
344 if (IS_VALLEYVIEW(dev)) { 394 if (IS_VALLEYVIEW(dev)) {
@@ -507,10 +557,17 @@ void i915_setup_sysfs(struct drm_device *dev)
507 DRM_ERROR("RC6 residency sysfs setup failed\n"); 557 DRM_ERROR("RC6 residency sysfs setup failed\n");
508 } 558 }
509#endif 559#endif
510 if (HAS_L3_GPU_CACHE(dev)) { 560 if (HAS_L3_DPF(dev)) {
511 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); 561 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
512 if (ret) 562 if (ret)
513 DRM_ERROR("l3 parity sysfs setup failed\n"); 563 DRM_ERROR("l3 parity sysfs setup failed\n");
564
565 if (NUM_L3_SLICES(dev) > 1) {
566 ret = device_create_bin_file(&dev->primary->kdev,
567 &dpf_attrs_1);
568 if (ret)
569 DRM_ERROR("l3 parity slice 1 setup failed\n");
570 }
514 } 571 }
515 572
516 ret = 0; 573 ret = 0;
@@ -534,6 +591,7 @@ void i915_teardown_sysfs(struct drm_device *dev)
534 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); 591 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
535 else 592 else
536 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 593 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
594 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1);
537 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 595 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
538#ifdef CONFIG_PM 596#ifdef CONFIG_PM
539 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 597 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6f6194..6e580c98dede 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
233 TP_printk("dev=%d", __entry->dev) 233 TP_printk("dev=%d", __entry->dev)
234); 234);
235 235
236TRACE_EVENT(i915_gem_evict_vm,
237 TP_PROTO(struct i915_address_space *vm),
238 TP_ARGS(vm),
239
240 TP_STRUCT__entry(
241 __field(struct i915_address_space *, vm)
242 ),
243
244 TP_fast_assign(
245 __entry->vm = vm;
246 ),
247
248 TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
249);
250
251TRACE_EVENT(i915_gem_ring_sync_to,
252 TP_PROTO(struct intel_ring_buffer *from,
253 struct intel_ring_buffer *to,
254 u32 seqno),
255 TP_ARGS(from, to, seqno),
256
257 TP_STRUCT__entry(
258 __field(u32, dev)
259 __field(u32, sync_from)
260 __field(u32, sync_to)
261 __field(u32, seqno)
262 ),
263
264 TP_fast_assign(
265 __entry->dev = from->dev->primary->index;
266 __entry->sync_from = from->id;
267 __entry->sync_to = to->id;
268 __entry->seqno = seqno;
269 ),
270
271 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
272 __entry->dev,
273 __entry->sync_from, __entry->sync_to,
274 __entry->seqno)
275);
276
236TRACE_EVENT(i915_gem_ring_dispatch, 277TRACE_EVENT(i915_gem_ring_dispatch,
237 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), 278 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
238 TP_ARGS(ring, seqno, flags), 279 TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
304 TP_ARGS(ring, seqno) 345 TP_ARGS(ring, seqno)
305); 346);
306 347
307DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 348TRACE_EVENT(i915_gem_request_complete,
308 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 349 TP_PROTO(struct intel_ring_buffer *ring),
309 TP_ARGS(ring, seqno) 350 TP_ARGS(ring),
351
352 TP_STRUCT__entry(
353 __field(u32, dev)
354 __field(u32, ring)
355 __field(u32, seqno)
356 ),
357
358 TP_fast_assign(
359 __entry->dev = ring->dev->primary->index;
360 __entry->ring = ring->id;
361 __entry->seqno = ring->get_seqno(ring, false);
362 ),
363
364 TP_printk("dev=%u, ring=%u, seqno=%u",
365 __entry->dev, __entry->ring, __entry->seqno)
310); 366);
311 367
312DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 368DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed8bc5f..e29bcae1ef81 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
389{ 389{
390 struct sdvo_device_mapping *p_mapping; 390 struct sdvo_device_mapping *p_mapping;
391 struct bdb_general_definitions *p_defs; 391 struct bdb_general_definitions *p_defs;
392 struct child_device_config *p_child; 392 union child_device_config *p_child;
393 int i, child_device_num, count; 393 int i, child_device_num, count;
394 u16 block_size; 394 u16 block_size;
395 395
@@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
416 count = 0; 416 count = 0;
417 for (i = 0; i < child_device_num; i++) { 417 for (i = 0; i < child_device_num; i++) {
418 p_child = &(p_defs->devices[i]); 418 p_child = &(p_defs->devices[i]);
419 if (!p_child->device_type) { 419 if (!p_child->old.device_type) {
420 /* skip the device block if device type is invalid */ 420 /* skip the device block if device type is invalid */
421 continue; 421 continue;
422 } 422 }
423 if (p_child->slave_addr != SLAVE_ADDR1 && 423 if (p_child->old.slave_addr != SLAVE_ADDR1 &&
424 p_child->slave_addr != SLAVE_ADDR2) { 424 p_child->old.slave_addr != SLAVE_ADDR2) {
425 /* 425 /*
426 * If the slave address is neither 0x70 nor 0x72, 426 * If the slave address is neither 0x70 nor 0x72,
427 * it is not a SDVO device. Skip it. 427 * it is not a SDVO device. Skip it.
428 */ 428 */
429 continue; 429 continue;
430 } 430 }
431 if (p_child->dvo_port != DEVICE_PORT_DVOB && 431 if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
432 p_child->dvo_port != DEVICE_PORT_DVOC) { 432 p_child->old.dvo_port != DEVICE_PORT_DVOC) {
433 /* skip the incorrect SDVO port */ 433 /* skip the incorrect SDVO port */
434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); 434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
435 continue; 435 continue;
436 } 436 }
437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" 437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
438 " %s port\n", 438 " %s port\n",
439 p_child->slave_addr, 439 p_child->old.slave_addr,
440 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 440 (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
441 "SDVOB" : "SDVOC"); 441 "SDVOB" : "SDVOC");
442 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); 442 p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
443 if (!p_mapping->initialized) { 443 if (!p_mapping->initialized) {
444 p_mapping->dvo_port = p_child->dvo_port; 444 p_mapping->dvo_port = p_child->old.dvo_port;
445 p_mapping->slave_addr = p_child->slave_addr; 445 p_mapping->slave_addr = p_child->old.slave_addr;
446 p_mapping->dvo_wiring = p_child->dvo_wiring; 446 p_mapping->dvo_wiring = p_child->old.dvo_wiring;
447 p_mapping->ddc_pin = p_child->ddc_pin; 447 p_mapping->ddc_pin = p_child->old.ddc_pin;
448 p_mapping->i2c_pin = p_child->i2c_pin; 448 p_mapping->i2c_pin = p_child->old.i2c_pin;
449 p_mapping->initialized = 1; 449 p_mapping->initialized = 1;
450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", 450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
451 p_mapping->dvo_port, 451 p_mapping->dvo_port,
@@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
458 "two SDVO device.\n"); 458 "two SDVO device.\n");
459 } 459 }
460 if (p_child->slave2_addr) { 460 if (p_child->old.slave2_addr) {
461 /* Maybe this is a SDVO device with multiple inputs */ 461 /* Maybe this is a SDVO device with multiple inputs */
462 /* And the mapping info is not added */ 462 /* And the mapping info is not added */
463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" 463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@ static void
477parse_driver_features(struct drm_i915_private *dev_priv, 477parse_driver_features(struct drm_i915_private *dev_priv,
478 struct bdb_header *bdb) 478 struct bdb_header *bdb)
479{ 479{
480 struct drm_device *dev = dev_priv->dev;
481 struct bdb_driver_features *driver; 480 struct bdb_driver_features *driver;
482 481
483 driver = find_section(bdb, BDB_DRIVER_FEATURES); 482 driver = find_section(bdb, BDB_DRIVER_FEATURES);
484 if (!driver) 483 if (!driver)
485 return; 484 return;
486 485
487 if (SUPPORTS_EDP(dev) && 486 if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
488 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
489 dev_priv->vbt.edp_support = 1; 487 dev_priv->vbt.edp_support = 1;
490 488
491 if (driver->dual_frequency) 489 if (driver->dual_frequency)
@@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
501 499
502 edp = find_section(bdb, BDB_EDP); 500 edp = find_section(bdb, BDB_EDP);
503 if (!edp) { 501 if (!edp) {
504 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support) 502 if (dev_priv->vbt.edp_support)
505 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); 503 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
506 return; 504 return;
507 } 505 }
@@ -569,11 +567,149 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
569} 567}
570 568
571static void 569static void
570parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
571{
572 struct bdb_mipi *mipi;
573
574 mipi = find_section(bdb, BDB_MIPI);
575 if (!mipi) {
576 DRM_DEBUG_KMS("No MIPI BDB found");
577 return;
578 }
579
580 /* XXX: add more info */
581 dev_priv->vbt.dsi.panel_id = mipi->panel_id;
582}
583
584static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
585 struct bdb_header *bdb)
586{
587 union child_device_config *it, *child = NULL;
588 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
589 uint8_t hdmi_level_shift;
590 int i, j;
591 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
592 uint8_t aux_channel;
593 /* Each DDI port can have more than one value on the "DVO Port" field,
594 * so look for all the possible values for each port and abort if more
595 * than one is found. */
596 int dvo_ports[][2] = {
597 {DVO_PORT_HDMIA, DVO_PORT_DPA},
598 {DVO_PORT_HDMIB, DVO_PORT_DPB},
599 {DVO_PORT_HDMIC, DVO_PORT_DPC},
600 {DVO_PORT_HDMID, DVO_PORT_DPD},
601 {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
602 };
603
604 /* Find the child device to use, abort if more than one found. */
605 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
606 it = dev_priv->vbt.child_dev + i;
607
608 for (j = 0; j < 2; j++) {
609 if (dvo_ports[port][j] == -1)
610 break;
611
612 if (it->common.dvo_port == dvo_ports[port][j]) {
613 if (child) {
614 DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
615 port_name(port));
616 return;
617 }
618 child = it;
619 }
620 }
621 }
622 if (!child)
623 return;
624
625 aux_channel = child->raw[25];
626
627 is_dvi = child->common.device_type & (1 << 4);
628 is_dp = child->common.device_type & (1 << 2);
629 is_crt = child->common.device_type & (1 << 0);
630 is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0;
631 is_edp = is_dp && (child->common.device_type & (1 << 12));
632
633 info->supports_dvi = is_dvi;
634 info->supports_hdmi = is_hdmi;
635 info->supports_dp = is_dp;
636
637 DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
638 port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
639
640 if (is_edp && is_dvi)
641 DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
642 port_name(port));
643 if (is_crt && port != PORT_E)
644 DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
645 if (is_crt && (is_dvi || is_dp))
646 DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
647 port_name(port));
648 if (is_dvi && (port == PORT_A || port == PORT_E))
649 DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
650 if (!is_dvi && !is_dp && !is_crt)
651 DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
652 port_name(port));
653 if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
654 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
655
656 if (is_dvi) {
657 if (child->common.ddc_pin == 0x05 && port != PORT_B)
658 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
659 if (child->common.ddc_pin == 0x04 && port != PORT_C)
660 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
661 if (child->common.ddc_pin == 0x06 && port != PORT_D)
662 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
663 }
664
665 if (is_dp) {
666 if (aux_channel == 0x40 && port != PORT_A)
667 DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
668 if (aux_channel == 0x10 && port != PORT_B)
669 DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
670 if (aux_channel == 0x20 && port != PORT_C)
671 DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
672 if (aux_channel == 0x30 && port != PORT_D)
673 DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
674 }
675
676 if (bdb->version >= 158) {
677 /* The VBT HDMI level shift values match the table we have. */
678 hdmi_level_shift = child->raw[7] & 0xF;
679 if (hdmi_level_shift < 0xC) {
680 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
681 port_name(port),
682 hdmi_level_shift);
683 info->hdmi_level_shift = hdmi_level_shift;
684 }
685 }
686}
687
688static void parse_ddi_ports(struct drm_i915_private *dev_priv,
689 struct bdb_header *bdb)
690{
691 struct drm_device *dev = dev_priv->dev;
692 enum port port;
693
694 if (!HAS_DDI(dev))
695 return;
696
697 if (!dev_priv->vbt.child_dev_num)
698 return;
699
700 if (bdb->version < 155)
701 return;
702
703 for (port = PORT_A; port < I915_MAX_PORTS; port++)
704 parse_ddi_port(dev_priv, port, bdb);
705}
706
707static void
572parse_device_mapping(struct drm_i915_private *dev_priv, 708parse_device_mapping(struct drm_i915_private *dev_priv,
573 struct bdb_header *bdb) 709 struct bdb_header *bdb)
574{ 710{
575 struct bdb_general_definitions *p_defs; 711 struct bdb_general_definitions *p_defs;
576 struct child_device_config *p_child, *child_dev_ptr; 712 union child_device_config *p_child, *child_dev_ptr;
577 int i, child_device_num, count; 713 int i, child_device_num, count;
578 u16 block_size; 714 u16 block_size;
579 715
@@ -601,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
601 /* get the number of child device that is present */ 737 /* get the number of child device that is present */
602 for (i = 0; i < child_device_num; i++) { 738 for (i = 0; i < child_device_num; i++) {
603 p_child = &(p_defs->devices[i]); 739 p_child = &(p_defs->devices[i]);
604 if (!p_child->device_type) { 740 if (!p_child->common.device_type) {
605 /* skip the device block if device type is invalid */ 741 /* skip the device block if device type is invalid */
606 continue; 742 continue;
607 } 743 }
@@ -621,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
621 count = 0; 757 count = 0;
622 for (i = 0; i < child_device_num; i++) { 758 for (i = 0; i < child_device_num; i++) {
623 p_child = &(p_defs->devices[i]); 759 p_child = &(p_defs->devices[i]);
624 if (!p_child->device_type) { 760 if (!p_child->common.device_type) {
625 /* skip the device block if device type is invalid */ 761 /* skip the device block if device type is invalid */
626 continue; 762 continue;
627 } 763 }
@@ -637,6 +773,7 @@ static void
637init_vbt_defaults(struct drm_i915_private *dev_priv) 773init_vbt_defaults(struct drm_i915_private *dev_priv)
638{ 774{
639 struct drm_device *dev = dev_priv->dev; 775 struct drm_device *dev = dev_priv->dev;
776 enum port port;
640 777
641 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC; 778 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
642 779
@@ -655,6 +792,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
655 dev_priv->vbt.lvds_use_ssc = 1; 792 dev_priv->vbt.lvds_use_ssc = 1;
656 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 793 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
657 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); 794 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
795
796 for (port = PORT_A; port < I915_MAX_PORTS; port++) {
797 struct ddi_vbt_port_info *info =
798 &dev_priv->vbt.ddi_port_info[port];
799
800 /* Recommended BSpec default: 800mV 0dB. */
801 info->hdmi_level_shift = 6;
802
803 info->supports_dvi = (port != PORT_A && port != PORT_E);
804 info->supports_hdmi = info->supports_dvi;
805 info->supports_dp = (port != PORT_E);
806 }
658} 807}
659 808
660static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 809static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +894,8 @@ intel_parse_bios(struct drm_device *dev)
745 parse_device_mapping(dev_priv, bdb); 894 parse_device_mapping(dev_priv, bdb);
746 parse_driver_features(dev_priv, bdb); 895 parse_driver_features(dev_priv, bdb);
747 parse_edp(dev_priv, bdb); 896 parse_edp(dev_priv, bdb);
897 parse_mipi(dev_priv, bdb);
898 parse_ddi_ports(dev_priv, bdb);
748 899
749 if (bios) 900 if (bios)
750 pci_unmap_rom(pdev, bios); 901 pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index e088d6f0956a..287cc5a21c2e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,6 +104,7 @@ struct vbios_data {
104#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
105#define BDB_LVDS_BACKLIGHT 43 105#define BDB_LVDS_BACKLIGHT 43
106#define BDB_LVDS_POWER 44 106#define BDB_LVDS_POWER 44
107#define BDB_MIPI 50
107#define BDB_SKIP 254 /* VBIOS private block, ignore */ 108#define BDB_SKIP 254 /* VBIOS private block, ignore */
108 109
109struct bdb_general_features { 110struct bdb_general_features {
@@ -201,7 +202,10 @@ struct bdb_general_features {
201#define DEVICE_PORT_DVOB 0x01 202#define DEVICE_PORT_DVOB 0x01
202#define DEVICE_PORT_DVOC 0x02 203#define DEVICE_PORT_DVOC 0x02
203 204
204struct child_device_config { 205/* We used to keep this struct but without any version control. We should avoid
206 * using it in the future, but it should be safe to keep using it in the old
207 * code. */
208struct old_child_dev_config {
205 u16 handle; 209 u16 handle;
206 u16 device_type; 210 u16 device_type;
207 u8 device_id[10]; /* ascii string */ 211 u8 device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@ struct child_device_config {
223 u8 dvo_function; 227 u8 dvo_function;
224} __attribute__((packed)); 228} __attribute__((packed));
225 229
230/* This one contains field offsets that are known to be common for all BDB
231 * versions. Notice that the meaning of the contents contents may still change,
232 * but at least the offsets are consistent. */
233struct common_child_dev_config {
234 u16 handle;
235 u16 device_type;
236 u8 not_common1[12];
237 u8 dvo_port;
238 u8 not_common2[2];
239 u8 ddc_pin;
240 u16 edid_ptr;
241} __attribute__((packed));
242
243/* This field changes depending on the BDB version, so the most reliable way to
244 * read it is by checking the BDB version and reading the raw pointer. */
245union child_device_config {
246 /* This one is safe to be used anywhere, but the code should still check
247 * the BDB version. */
248 u8 raw[33];
249 /* This one should only be kept for legacy code. */
250 struct old_child_dev_config old;
251 /* This one should also be safe to use anywhere, even without version
252 * checks. */
253 struct common_child_dev_config common;
254};
255
226struct bdb_general_definitions { 256struct bdb_general_definitions {
227 /* DDC GPIO */ 257 /* DDC GPIO */
228 u8 crt_ddc_gmbus_pin; 258 u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@ struct bdb_general_definitions {
248 * number = (block_size - sizeof(bdb_general_definitions))/ 278 * number = (block_size - sizeof(bdb_general_definitions))/
249 * sizeof(child_device_config); 279 * sizeof(child_device_config);
250 */ 280 */
251 struct child_device_config devices[0]; 281 union child_device_config devices[0];
252} __attribute__((packed)); 282} __attribute__((packed));
253 283
254struct bdb_lvds_options { 284struct bdb_lvds_options {
@@ -618,4 +648,57 @@ int intel_parse_bios(struct drm_device *dev);
618#define PORT_IDPC 8 648#define PORT_IDPC 8
619#define PORT_IDPD 9 649#define PORT_IDPD 9
620 650
651/* Possible values for the "DVO Port" field for versions >= 155: */
652#define DVO_PORT_HDMIA 0
653#define DVO_PORT_HDMIB 1
654#define DVO_PORT_HDMIC 2
655#define DVO_PORT_HDMID 3
656#define DVO_PORT_LVDS 4
657#define DVO_PORT_TV 5
658#define DVO_PORT_CRT 6
659#define DVO_PORT_DPB 7
660#define DVO_PORT_DPC 8
661#define DVO_PORT_DPD 9
662#define DVO_PORT_DPA 10
663
664/* MIPI DSI panel info */
665struct bdb_mipi {
666 u16 panel_id;
667 u16 bridge_revision;
668
669 /* General params */
670 u32 dithering:1;
671 u32 bpp_pixel_format:1;
672 u32 rsvd1:1;
673 u32 dphy_valid:1;
674 u32 resvd2:28;
675
676 u16 port_info;
677 u16 rsvd3:2;
678 u16 num_lanes:2;
679 u16 rsvd4:12;
680
681 /* DSI config */
682 u16 virt_ch_num:2;
683 u16 vtm:2;
684 u16 rsvd5:12;
685
686 u32 dsi_clock;
687 u32 bridge_ref_clk;
688 u16 rsvd_pwr;
689
690 /* Dphy Params */
691 u32 prepare_cnt:5;
692 u32 rsvd6:3;
693 u32 clk_zero_cnt:8;
694 u32 trail_cnt:5;
695 u32 rsvd7:3;
696 u32 exit_zero_cnt:6;
697 u32 rsvd8:2;
698
699 u32 hl_switch_cnt;
700 u32 lp_byte_clk;
701 u32 clk_lane_switch_cnt;
702} __attribute__((packed));
703
621#endif /* _I830_BIOS_H_ */ 704#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 10d1de5bce6f..2e01bd3a5d8c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -107,7 +107,17 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
107static void intel_crt_get_config(struct intel_encoder *encoder, 107static void intel_crt_get_config(struct intel_encoder *encoder,
108 struct intel_crtc_config *pipe_config) 108 struct intel_crtc_config *pipe_config)
109{ 109{
110 struct drm_device *dev = encoder->base.dev;
111 int dotclock;
112
110 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); 113 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
114
115 dotclock = pipe_config->port_clock;
116
117 if (HAS_PCH_SPLIT(dev))
118 ironlake_check_encoder_dotclock(pipe_config, dotclock);
119
120 pipe_config->adjusted_mode.crtc_clock = dotclock;
111} 121}
112 122
113static void hsw_crt_get_config(struct intel_encoder *encoder, 123static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -264,7 +274,7 @@ static void intel_crt_mode_set(struct intel_encoder *encoder)
264 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; 274 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
265 u32 adpa; 275 u32 adpa;
266 276
267 if (HAS_PCH_SPLIT(dev)) 277 if (INTEL_INFO(dev)->gen >= 5)
268 adpa = ADPA_HOTPLUG_BITS; 278 adpa = ADPA_HOTPLUG_BITS;
269 else 279 else
270 adpa = 0; 280 adpa = 0;
@@ -366,9 +376,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
366 376
367 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); 377 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
368 378
369 /* FIXME: debug force function and remove */
370 ret = true;
371
372 return ret; 379 return ret;
373} 380}
374 381
@@ -670,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
670 677
671static void intel_crt_destroy(struct drm_connector *connector) 678static void intel_crt_destroy(struct drm_connector *connector)
672{ 679{
673 drm_sysfs_connector_remove(connector);
674 drm_connector_cleanup(connector); 680 drm_connector_cleanup(connector);
675 kfree(connector); 681 kfree(connector);
676} 682}
@@ -776,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
776 if (!crt) 782 if (!crt)
777 return; 783 return;
778 784
779 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 785 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
780 if (!intel_connector) { 786 if (!intel_connector) {
781 kfree(crt); 787 kfree(crt);
782 return; 788 return;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b53fff84a7d5..31f4fe271388 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
42 0x80C30FFF, 0x000B0000, 42 0x80C30FFF, 0x000B0000,
43 0x00FFFFFF, 0x00040006, 43 0x00FFFFFF, 0x00040006,
44 0x80D75FFF, 0x000B0000, 44 0x80D75FFF, 0x000B0000,
45 0x00FFFFFF, 0x00040006 /* HDMI parameters */
46}; 45};
47 46
48static const u32 hsw_ddi_translations_fdi[] = { 47static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,25 @@ static const u32 hsw_ddi_translations_fdi[] = {
55 0x00C30FFF, 0x001E0000, 54 0x00C30FFF, 0x001E0000,
56 0x00FFFFFF, 0x00060006, 55 0x00FFFFFF, 0x00060006,
57 0x00D75FFF, 0x001E0000, 56 0x00D75FFF, 0x001E0000,
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59}; 57};
60 58
61static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 59static const u32 hsw_ddi_translations_hdmi[] = {
60 /* Idx NT mV diff T mV diff db */
61 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
62 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
63 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
64 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
65 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
66 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
67 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
68 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
69 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
70 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
71 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
73};
74
75enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
62{ 76{
63 struct drm_encoder *encoder = &intel_encoder->base; 77 struct drm_encoder *encoder = &intel_encoder->base;
64 int type = intel_encoder->type; 78 int type = intel_encoder->type;
@@ -92,12 +106,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
92 const u32 *ddi_translations = (port == PORT_E) ? 106 const u32 *ddi_translations = (port == PORT_E) ?
93 hsw_ddi_translations_fdi : 107 hsw_ddi_translations_fdi :
94 hsw_ddi_translations_dp; 108 hsw_ddi_translations_dp;
109 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
95 110
96 for (i = 0, reg = DDI_BUF_TRANS(port); 111 for (i = 0, reg = DDI_BUF_TRANS(port);
97 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { 112 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
98 I915_WRITE(reg, ddi_translations[i]); 113 I915_WRITE(reg, ddi_translations[i]);
99 reg += 4; 114 reg += 4;
100 } 115 }
116 /* Entry 9 is for HDMI: */
117 for (i = 0; i < 2; i++) {
118 I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
119 reg += 4;
120 }
101} 121}
102 122
103/* Program DDI buffers translations for DP. By default, program ports A-D in DP 123/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +316,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder)
296 DRM_DEBUG_DRIVER("DP audio: write eld information\n"); 316 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
297 intel_write_eld(&encoder->base, adjusted_mode); 317 intel_write_eld(&encoder->base, adjusted_mode);
298 } 318 }
299
300 intel_dp_init_link_config(intel_dp);
301
302 } else if (type == INTEL_OUTPUT_HDMI) { 319 } else if (type == INTEL_OUTPUT_HDMI) {
303 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 320 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
304 321
@@ -767,9 +784,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
767 BUG(); 784 BUG();
768 } 785 }
769 786
770 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 787 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
771 temp |= TRANS_DDI_PVSYNC; 788 temp |= TRANS_DDI_PVSYNC;
772 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 789 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
773 temp |= TRANS_DDI_PHSYNC; 790 temp |= TRANS_DDI_PHSYNC;
774 791
775 if (cpu_transcoder == TRANSCODER_EDP) { 792 if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1202,7 +1219,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1202 1219
1203 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | 1220 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
1204 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; 1221 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
1205 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 1222 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1206 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; 1223 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
1207 I915_WRITE(DP_TP_CTL(port), val); 1224 I915_WRITE(DP_TP_CTL(port), val);
1208 POSTING_READ(DP_TP_CTL(port)); 1225 POSTING_READ(DP_TP_CTL(port));
@@ -1285,6 +1302,20 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1285 default: 1302 default:
1286 break; 1303 break;
1287 } 1304 }
1305
1306 switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
1307 case TRANS_DDI_MODE_SELECT_HDMI:
1308 case TRANS_DDI_MODE_SELECT_DVI:
1309 case TRANS_DDI_MODE_SELECT_FDI:
1310 break;
1311 case TRANS_DDI_MODE_SELECT_DP_SST:
1312 case TRANS_DDI_MODE_SELECT_DP_MST:
1313 pipe_config->has_dp_encoder = true;
1314 intel_dp_get_m_n(intel_crtc, pipe_config);
1315 break;
1316 default:
1317 break;
1318 }
1288} 1319}
1289 1320
1290static void intel_ddi_destroy(struct drm_encoder *encoder) 1321static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1314,6 +1345,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
1314 .destroy = intel_ddi_destroy, 1345 .destroy = intel_ddi_destroy,
1315}; 1346};
1316 1347
1348static struct intel_connector *
1349intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
1350{
1351 struct intel_connector *connector;
1352 enum port port = intel_dig_port->port;
1353
1354 connector = kzalloc(sizeof(*connector), GFP_KERNEL);
1355 if (!connector)
1356 return NULL;
1357
1358 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1359 if (!intel_dp_init_connector(intel_dig_port, connector)) {
1360 kfree(connector);
1361 return NULL;
1362 }
1363
1364 return connector;
1365}
1366
1367static struct intel_connector *
1368intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
1369{
1370 struct intel_connector *connector;
1371 enum port port = intel_dig_port->port;
1372
1373 connector = kzalloc(sizeof(*connector), GFP_KERNEL);
1374 if (!connector)
1375 return NULL;
1376
1377 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
1378 intel_hdmi_init_connector(intel_dig_port, connector);
1379
1380 return connector;
1381}
1382
1317void intel_ddi_init(struct drm_device *dev, enum port port) 1383void intel_ddi_init(struct drm_device *dev, enum port port)
1318{ 1384{
1319 struct drm_i915_private *dev_priv = dev->dev_private; 1385 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1322,17 +1388,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1322 struct drm_encoder *encoder; 1388 struct drm_encoder *encoder;
1323 struct intel_connector *hdmi_connector = NULL; 1389 struct intel_connector *hdmi_connector = NULL;
1324 struct intel_connector *dp_connector = NULL; 1390 struct intel_connector *dp_connector = NULL;
1391 bool init_hdmi, init_dp;
1392
1393 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
1394 dev_priv->vbt.ddi_port_info[port].supports_hdmi);
1395 init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
1396 if (!init_dp && !init_hdmi) {
1397 DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
1398 port_name(port));
1399 init_hdmi = true;
1400 init_dp = true;
1401 }
1325 1402
1326 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1403 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1327 if (!intel_dig_port) 1404 if (!intel_dig_port)
1328 return; 1405 return;
1329 1406
1330 dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1331 if (!dp_connector) {
1332 kfree(intel_dig_port);
1333 return;
1334 }
1335
1336 intel_encoder = &intel_dig_port->base; 1407 intel_encoder = &intel_dig_port->base;
1337 encoder = &intel_encoder->base; 1408 encoder = &intel_encoder->base;
1338 1409
@@ -1352,28 +1423,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1352 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 1423 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
1353 (DDI_BUF_PORT_REVERSAL | 1424 (DDI_BUF_PORT_REVERSAL |
1354 DDI_A_4_LANES); 1425 DDI_A_4_LANES);
1355 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1356 1426
1357 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1427 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
1358 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 1428 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1359 intel_encoder->cloneable = false; 1429 intel_encoder->cloneable = false;
1360 intel_encoder->hot_plug = intel_ddi_hot_plug; 1430 intel_encoder->hot_plug = intel_ddi_hot_plug;
1361 1431
1362 if (!intel_dp_init_connector(intel_dig_port, dp_connector)) { 1432 if (init_dp)
1363 drm_encoder_cleanup(encoder); 1433 dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
1364 kfree(intel_dig_port);
1365 kfree(dp_connector);
1366 return;
1367 }
1368 1434
1369 if (intel_encoder->type != INTEL_OUTPUT_EDP) { 1435 /* In theory we don't need the encoder->type check, but leave it just in
1370 hdmi_connector = kzalloc(sizeof(struct intel_connector), 1436 * case we have some really bad VBTs... */
1371 GFP_KERNEL); 1437 if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
1372 if (!hdmi_connector) { 1438 hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
1373 return;
1374 }
1375 1439
1376 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); 1440 if (!dp_connector && !hdmi_connector) {
1377 intel_hdmi_init_connector(intel_dig_port, hdmi_connector); 1441 drm_encoder_cleanup(encoder);
1442 kfree(intel_dig_port);
1378 } 1443 }
1379} 1444}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d78d33f9337d..f34252d134b6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,14 +41,13 @@
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h> 42#include <linux/dma_remapping.h>
43 43
44bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 44static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 45static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 46
48static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 47static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
49 struct intel_crtc_config *pipe_config); 48 struct intel_crtc_config *pipe_config);
50static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 49static void ironlake_pch_clock_get(struct intel_crtc *crtc,
51 struct intel_crtc_config *pipe_config); 50 struct intel_crtc_config *pipe_config);
52 51
53static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 52static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
54 int x, int y, struct drm_framebuffer *old_fb); 53 int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@ struct intel_limit {
69 intel_p2_t p2; 68 intel_p2_t p2;
70}; 69};
71 70
72/* FDI */
73#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
74
75int 71int
76intel_pch_rawclk(struct drm_device *dev) 72intel_pch_rawclk(struct drm_device *dev)
77{ 73{
@@ -313,44 +309,44 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
313 .p2_slow = 7, .p2_fast = 7 }, 309 .p2_slow = 7, .p2_fast = 7 },
314}; 310};
315 311
316static const intel_limit_t intel_limits_vlv_dac = { 312static const intel_limit_t intel_limits_vlv = {
317 .dot = { .min = 25000, .max = 270000 }, 313 /*
318 .vco = { .min = 4000000, .max = 6000000 }, 314 * These are the data rate limits (measured in fast clocks)
319 .n = { .min = 1, .max = 7 }, 315 * since those are the strictest limits we have. The fast
320 .m = { .min = 22, .max = 450 }, /* guess */ 316 * clock and actual rate limits are more relaxed, so checking
321 .m1 = { .min = 2, .max = 3 }, 317 * them would make no difference.
322 .m2 = { .min = 11, .max = 156 }, 318 */
323 .p = { .min = 10, .max = 30 }, 319 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
324 .p1 = { .min = 1, .max = 3 },
325 .p2 = { .dot_limit = 270000,
326 .p2_slow = 2, .p2_fast = 20 },
327};
328
329static const intel_limit_t intel_limits_vlv_hdmi = {
330 .dot = { .min = 25000, .max = 270000 },
331 .vco = { .min = 4000000, .max = 6000000 }, 320 .vco = { .min = 4000000, .max = 6000000 },
332 .n = { .min = 1, .max = 7 }, 321 .n = { .min = 1, .max = 7 },
333 .m = { .min = 60, .max = 300 }, /* guess */
334 .m1 = { .min = 2, .max = 3 }, 322 .m1 = { .min = 2, .max = 3 },
335 .m2 = { .min = 11, .max = 156 }, 323 .m2 = { .min = 11, .max = 156 },
336 .p = { .min = 10, .max = 30 },
337 .p1 = { .min = 2, .max = 3 }, 324 .p1 = { .min = 2, .max = 3 },
338 .p2 = { .dot_limit = 270000, 325 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
339 .p2_slow = 2, .p2_fast = 20 },
340}; 326};
341 327
342static const intel_limit_t intel_limits_vlv_dp = { 328static void vlv_clock(int refclk, intel_clock_t *clock)
343 .dot = { .min = 25000, .max = 270000 }, 329{
344 .vco = { .min = 4000000, .max = 6000000 }, 330 clock->m = clock->m1 * clock->m2;
345 .n = { .min = 1, .max = 7 }, 331 clock->p = clock->p1 * clock->p2;
346 .m = { .min = 22, .max = 450 }, 332 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
347 .m1 = { .min = 2, .max = 3 }, 333 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
348 .m2 = { .min = 11, .max = 156 }, 334}
349 .p = { .min = 10, .max = 30 }, 335
350 .p1 = { .min = 1, .max = 3 }, 336/**
351 .p2 = { .dot_limit = 270000, 337 * Returns whether any output on the specified pipe is of the specified type
352 .p2_slow = 2, .p2_fast = 20 }, 338 */
353}; 339static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
340{
341 struct drm_device *dev = crtc->dev;
342 struct intel_encoder *encoder;
343
344 for_each_encoder_on_crtc(dev, crtc, encoder)
345 if (encoder->type == type)
346 return true;
347
348 return false;
349}
354 350
355static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 351static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
356 int refclk) 352 int refclk)
@@ -412,12 +408,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
412 else 408 else
413 limit = &intel_limits_pineview_sdvo; 409 limit = &intel_limits_pineview_sdvo;
414 } else if (IS_VALLEYVIEW(dev)) { 410 } else if (IS_VALLEYVIEW(dev)) {
415 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) 411 limit = &intel_limits_vlv;
416 limit = &intel_limits_vlv_dac;
417 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
418 limit = &intel_limits_vlv_hdmi;
419 else
420 limit = &intel_limits_vlv_dp;
421 } else if (!IS_GEN2(dev)) { 412 } else if (!IS_GEN2(dev)) {
422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 413 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
423 limit = &intel_limits_i9xx_lvds; 414 limit = &intel_limits_i9xx_lvds;
@@ -439,8 +430,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
439{ 430{
440 clock->m = clock->m2 + 2; 431 clock->m = clock->m2 + 2;
441 clock->p = clock->p1 * clock->p2; 432 clock->p = clock->p1 * clock->p2;
442 clock->vco = refclk * clock->m / clock->n; 433 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
443 clock->dot = clock->vco / clock->p; 434 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
444} 435}
445 436
446static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 437static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -452,23 +443,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
452{ 443{
453 clock->m = i9xx_dpll_compute_m(clock); 444 clock->m = i9xx_dpll_compute_m(clock);
454 clock->p = clock->p1 * clock->p2; 445 clock->p = clock->p1 * clock->p2;
455 clock->vco = refclk * clock->m / (clock->n + 2); 446 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
456 clock->dot = clock->vco / clock->p; 447 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
457}
458
459/**
460 * Returns whether any output on the specified pipe is of the specified type
461 */
462bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
463{
464 struct drm_device *dev = crtc->dev;
465 struct intel_encoder *encoder;
466
467 for_each_encoder_on_crtc(dev, crtc, encoder)
468 if (encoder->type == type)
469 return true;
470
471 return false;
472} 448}
473 449
474#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 450#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -481,20 +457,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
481 const intel_limit_t *limit, 457 const intel_limit_t *limit,
482 const intel_clock_t *clock) 458 const intel_clock_t *clock)
483{ 459{
460 if (clock->n < limit->n.min || limit->n.max < clock->n)
461 INTELPllInvalid("n out of range\n");
484 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 462 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
485 INTELPllInvalid("p1 out of range\n"); 463 INTELPllInvalid("p1 out of range\n");
486 if (clock->p < limit->p.min || limit->p.max < clock->p)
487 INTELPllInvalid("p out of range\n");
488 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 464 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
489 INTELPllInvalid("m2 out of range\n"); 465 INTELPllInvalid("m2 out of range\n");
490 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 466 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
491 INTELPllInvalid("m1 out of range\n"); 467 INTELPllInvalid("m1 out of range\n");
492 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 468
493 INTELPllInvalid("m1 <= m2\n"); 469 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
494 if (clock->m < limit->m.min || limit->m.max < clock->m) 470 if (clock->m1 <= clock->m2)
495 INTELPllInvalid("m out of range\n"); 471 INTELPllInvalid("m1 <= m2\n");
496 if (clock->n < limit->n.min || limit->n.max < clock->n) 472
497 INTELPllInvalid("n out of range\n"); 473 if (!IS_VALLEYVIEW(dev)) {
474 if (clock->p < limit->p.min || limit->p.max < clock->p)
475 INTELPllInvalid("p out of range\n");
476 if (clock->m < limit->m.min || limit->m.max < clock->m)
477 INTELPllInvalid("m out of range\n");
478 }
479
498 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 480 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
499 INTELPllInvalid("vco out of range\n"); 481 INTELPllInvalid("vco out of range\n");
500 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 482 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
@@ -688,67 +670,73 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
688 int target, int refclk, intel_clock_t *match_clock, 670 int target, int refclk, intel_clock_t *match_clock,
689 intel_clock_t *best_clock) 671 intel_clock_t *best_clock)
690{ 672{
691 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; 673 struct drm_device *dev = crtc->dev;
692 u32 m, n, fastclk; 674 intel_clock_t clock;
693 u32 updrate, minupdate, p; 675 unsigned int bestppm = 1000000;
694 unsigned long bestppm, ppm, absppm; 676 /* min update 19.2 MHz */
695 int dotclk, flag; 677 int max_n = min(limit->n.max, refclk / 19200);
696 678 bool found = false;
697 flag = 0; 679
698 dotclk = target * 1000; 680 target *= 5; /* fast clock */
699 bestppm = 1000000; 681
700 ppm = absppm = 0; 682 memset(best_clock, 0, sizeof(*best_clock));
701 fastclk = dotclk / (2*100);
702 updrate = 0;
703 minupdate = 19200;
704 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
705 bestm1 = bestm2 = bestp1 = bestp2 = 0;
706 683
707 /* based on hardware requirement, prefer smaller n to precision */ 684 /* based on hardware requirement, prefer smaller n to precision */
708 for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { 685 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
709 updrate = refclk / n; 686 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
710 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { 687 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
711 for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { 688 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
712 if (p2 > 10) 689 clock.p = clock.p1 * clock.p2;
713 p2 = p2 - 1;
714 p = p1 * p2;
715 /* based on hardware requirement, prefer bigger m1,m2 values */ 690 /* based on hardware requirement, prefer bigger m1,m2 values */
716 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { 691 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
717 m2 = (((2*(fastclk * p * n / m1 )) + 692 unsigned int ppm, diff;
718 refclk) / (2*refclk)); 693
719 m = m1 * m2; 694 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
720 vco = updrate * m; 695 refclk * clock.m1);
721 if (vco >= limit->vco.min && vco < limit->vco.max) { 696
722 ppm = 1000000 * ((vco / p) - fastclk) / fastclk; 697 vlv_clock(refclk, &clock);
723 absppm = (ppm > 0) ? ppm : (-ppm); 698
724 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { 699 if (!intel_PLL_is_valid(dev, limit,
725 bestppm = 0; 700 &clock))
726 flag = 1; 701 continue;
727 } 702
728 if (absppm < bestppm - 10) { 703 diff = abs(clock.dot - target);
729 bestppm = absppm; 704 ppm = div_u64(1000000ULL * diff, target);
730 flag = 1; 705
731 } 706 if (ppm < 100 && clock.p > best_clock->p) {
732 if (flag) { 707 bestppm = 0;
733 bestn = n; 708 *best_clock = clock;
734 bestm1 = m1; 709 found = true;
735 bestm2 = m2; 710 }
736 bestp1 = p1; 711
737 bestp2 = p2; 712 if (bestppm >= 10 && ppm < bestppm - 10) {
738 flag = 0; 713 bestppm = ppm;
739 } 714 *best_clock = clock;
715 found = true;
740 } 716 }
741 } 717 }
742 } 718 }
743 } 719 }
744 } 720 }
745 best_clock->n = bestn;
746 best_clock->m1 = bestm1;
747 best_clock->m2 = bestm2;
748 best_clock->p1 = bestp1;
749 best_clock->p2 = bestp2;
750 721
751 return true; 722 return found;
723}
724
725bool intel_crtc_active(struct drm_crtc *crtc)
726{
727 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
728
729 /* Be paranoid as we can arrive here with only partial
730 * state retrieved from the hardware during setup.
731 *
732 * We can ditch the adjusted_mode.crtc_clock check as soon
733 * as Haswell has gained clock readout/fastboot support.
734 *
735 * We can ditch the crtc->fb check as soon as we can
736 * properly reconstruct framebuffers.
737 */
738 return intel_crtc->active && crtc->fb &&
739 intel_crtc->config.adjusted_mode.crtc_clock;
752} 740}
753 741
754enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 742enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -812,6 +800,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
812 DRM_DEBUG_KMS("vblank wait timed out\n"); 800 DRM_DEBUG_KMS("vblank wait timed out\n");
813} 801}
814 802
803static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
804{
805 struct drm_i915_private *dev_priv = dev->dev_private;
806 u32 reg = PIPEDSL(pipe);
807 u32 line1, line2;
808 u32 line_mask;
809
810 if (IS_GEN2(dev))
811 line_mask = DSL_LINEMASK_GEN2;
812 else
813 line_mask = DSL_LINEMASK_GEN3;
814
815 line1 = I915_READ(reg) & line_mask;
816 mdelay(5);
817 line2 = I915_READ(reg) & line_mask;
818
819 return line1 == line2;
820}
821
815/* 822/*
816 * intel_wait_for_pipe_off - wait for pipe to turn off 823 * intel_wait_for_pipe_off - wait for pipe to turn off
817 * @dev: drm device 824 * @dev: drm device
@@ -843,22 +850,8 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
843 100)) 850 100))
844 WARN(1, "pipe_off wait timed out\n"); 851 WARN(1, "pipe_off wait timed out\n");
845 } else { 852 } else {
846 u32 last_line, line_mask;
847 int reg = PIPEDSL(pipe);
848 unsigned long timeout = jiffies + msecs_to_jiffies(100);
849
850 if (IS_GEN2(dev))
851 line_mask = DSL_LINEMASK_GEN2;
852 else
853 line_mask = DSL_LINEMASK_GEN3;
854
855 /* Wait for the display line to settle */ 853 /* Wait for the display line to settle */
856 do { 854 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
857 last_line = I915_READ(reg) & line_mask;
858 mdelay(5);
859 } while (((I915_READ(reg) & line_mask) != last_line) &&
860 time_after(timeout, jiffies));
861 if (time_after(jiffies, timeout))
862 WARN(1, "pipe_off wait timed out\n"); 855 WARN(1, "pipe_off wait timed out\n");
863 } 856 }
864} 857}
@@ -929,6 +922,24 @@ void assert_pll(struct drm_i915_private *dev_priv,
929 state_string(state), state_string(cur_state)); 922 state_string(state), state_string(cur_state));
930} 923}
931 924
925/* XXX: the dsi pll is shared between MIPI DSI ports */
926static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
927{
928 u32 val;
929 bool cur_state;
930
931 mutex_lock(&dev_priv->dpio_lock);
932 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
933 mutex_unlock(&dev_priv->dpio_lock);
934
935 cur_state = val & DSI_PLL_VCO_EN;
936 WARN(cur_state != state,
937 "DSI PLL state assertion failure (expected %s, current %s)\n",
938 state_string(state), state_string(cur_state));
939}
940#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
941#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
942
932struct intel_shared_dpll * 943struct intel_shared_dpll *
933intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 944intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
934{ 945{
@@ -1069,6 +1080,26 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1069 pipe_name(pipe)); 1080 pipe_name(pipe));
1070} 1081}
1071 1082
1083static void assert_cursor(struct drm_i915_private *dev_priv,
1084 enum pipe pipe, bool state)
1085{
1086 struct drm_device *dev = dev_priv->dev;
1087 bool cur_state;
1088
1089 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1090 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1091 else if (IS_845G(dev) || IS_I865G(dev))
1092 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1093 else
1094 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1095
1096 WARN(cur_state != state,
1097 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1098 pipe_name(pipe), state_string(state), state_string(cur_state));
1099}
1100#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1101#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1102
1072void assert_pipe(struct drm_i915_private *dev_priv, 1103void assert_pipe(struct drm_i915_private *dev_priv,
1073 enum pipe pipe, bool state) 1104 enum pipe pipe, bool state)
1074{ 1105{
@@ -1323,6 +1354,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1323 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1354 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1324} 1355}
1325 1356
1357static void intel_init_dpio(struct drm_device *dev)
1358{
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360
1361 if (!IS_VALLEYVIEW(dev))
1362 return;
1363
1364 /*
1365 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1366 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1367 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1368 * b. The other bits such as sfr settings / modesel may all be set
1369 * to 0.
1370 *
1371 * This should only be done on init and resume from S3 with both
1372 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1373 */
1374 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1375}
1376
1326static void vlv_enable_pll(struct intel_crtc *crtc) 1377static void vlv_enable_pll(struct intel_crtc *crtc)
1327{ 1378{
1328 struct drm_device *dev = crtc->base.dev; 1379 struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1480,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1429 POSTING_READ(DPLL(pipe)); 1480 POSTING_READ(DPLL(pipe));
1430} 1481}
1431 1482
1483static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1484{
1485 u32 val = 0;
1486
1487 /* Make sure the pipe isn't still relying on us */
1488 assert_pipe_disabled(dev_priv, pipe);
1489
1490 /* Leave integrated clock source enabled */
1491 if (pipe == PIPE_B)
1492 val = DPLL_INTEGRATED_CRI_CLK_VLV;
1493 I915_WRITE(DPLL(pipe), val);
1494 POSTING_READ(DPLL(pipe));
1495}
1496
1432void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1497void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1433{ 1498{
1434 u32 port_mask; 1499 u32 port_mask;
@@ -1661,7 +1726,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1661 * returning. 1726 * returning.
1662 */ 1727 */
1663static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1728static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1664 bool pch_port) 1729 bool pch_port, bool dsi)
1665{ 1730{
1666 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1731 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1667 pipe); 1732 pipe);
@@ -1670,6 +1735,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1670 u32 val; 1735 u32 val;
1671 1736
1672 assert_planes_disabled(dev_priv, pipe); 1737 assert_planes_disabled(dev_priv, pipe);
1738 assert_cursor_disabled(dev_priv, pipe);
1673 assert_sprites_disabled(dev_priv, pipe); 1739 assert_sprites_disabled(dev_priv, pipe);
1674 1740
1675 if (HAS_PCH_LPT(dev_priv->dev)) 1741 if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1749,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1683 * need the check. 1749 * need the check.
1684 */ 1750 */
1685 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1751 if (!HAS_PCH_SPLIT(dev_priv->dev))
1686 assert_pll_enabled(dev_priv, pipe); 1752 if (dsi)
1753 assert_dsi_pll_enabled(dev_priv);
1754 else
1755 assert_pll_enabled(dev_priv, pipe);
1687 else { 1756 else {
1688 if (pch_port) { 1757 if (pch_port) {
1689 /* if driving the PCH, we need FDI enabled */ 1758 /* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1797,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1728 * or we might hang the display. 1797 * or we might hang the display.
1729 */ 1798 */
1730 assert_planes_disabled(dev_priv, pipe); 1799 assert_planes_disabled(dev_priv, pipe);
1800 assert_cursor_disabled(dev_priv, pipe);
1731 assert_sprites_disabled(dev_priv, pipe); 1801 assert_sprites_disabled(dev_priv, pipe);
1732 1802
1733 /* Don't disable pipe A or pipe A PLLs if needed */ 1803 /* Don't disable pipe A or pipe A PLLs if needed */
@@ -1747,63 +1817,75 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1747 * Plane regs are double buffered, going from enabled->disabled needs a 1817 * Plane regs are double buffered, going from enabled->disabled needs a
1748 * trigger in order to latch. The display address reg provides this. 1818 * trigger in order to latch. The display address reg provides this.
1749 */ 1819 */
1750void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1820void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1751 enum plane plane) 1821 enum plane plane)
1752{ 1822{
1753 if (dev_priv->info->gen >= 4) 1823 u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
1754 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1824
1755 else 1825 I915_WRITE(reg, I915_READ(reg));
1756 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1826 POSTING_READ(reg);
1757} 1827}
1758 1828
1759/** 1829/**
1760 * intel_enable_plane - enable a display plane on a given pipe 1830 * intel_enable_primary_plane - enable the primary plane on a given pipe
1761 * @dev_priv: i915 private structure 1831 * @dev_priv: i915 private structure
1762 * @plane: plane to enable 1832 * @plane: plane to enable
1763 * @pipe: pipe being fed 1833 * @pipe: pipe being fed
1764 * 1834 *
1765 * Enable @plane on @pipe, making sure that @pipe is running first. 1835 * Enable @plane on @pipe, making sure that @pipe is running first.
1766 */ 1836 */
1767static void intel_enable_plane(struct drm_i915_private *dev_priv, 1837static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
1768 enum plane plane, enum pipe pipe) 1838 enum plane plane, enum pipe pipe)
1769{ 1839{
1840 struct intel_crtc *intel_crtc =
1841 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1770 int reg; 1842 int reg;
1771 u32 val; 1843 u32 val;
1772 1844
1773 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 1845 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1774 assert_pipe_enabled(dev_priv, pipe); 1846 assert_pipe_enabled(dev_priv, pipe);
1775 1847
1848 WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
1849
1850 intel_crtc->primary_enabled = true;
1851
1776 reg = DSPCNTR(plane); 1852 reg = DSPCNTR(plane);
1777 val = I915_READ(reg); 1853 val = I915_READ(reg);
1778 if (val & DISPLAY_PLANE_ENABLE) 1854 if (val & DISPLAY_PLANE_ENABLE)
1779 return; 1855 return;
1780 1856
1781 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1857 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1782 intel_flush_display_plane(dev_priv, plane); 1858 intel_flush_primary_plane(dev_priv, plane);
1783 intel_wait_for_vblank(dev_priv->dev, pipe); 1859 intel_wait_for_vblank(dev_priv->dev, pipe);
1784} 1860}
1785 1861
1786/** 1862/**
1787 * intel_disable_plane - disable a display plane 1863 * intel_disable_primary_plane - disable the primary plane
1788 * @dev_priv: i915 private structure 1864 * @dev_priv: i915 private structure
1789 * @plane: plane to disable 1865 * @plane: plane to disable
1790 * @pipe: pipe consuming the data 1866 * @pipe: pipe consuming the data
1791 * 1867 *
1792 * Disable @plane; should be an independent operation. 1868 * Disable @plane; should be an independent operation.
1793 */ 1869 */
1794static void intel_disable_plane(struct drm_i915_private *dev_priv, 1870static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
1795 enum plane plane, enum pipe pipe) 1871 enum plane plane, enum pipe pipe)
1796{ 1872{
1873 struct intel_crtc *intel_crtc =
1874 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1797 int reg; 1875 int reg;
1798 u32 val; 1876 u32 val;
1799 1877
1878 WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
1879
1880 intel_crtc->primary_enabled = false;
1881
1800 reg = DSPCNTR(plane); 1882 reg = DSPCNTR(plane);
1801 val = I915_READ(reg); 1883 val = I915_READ(reg);
1802 if ((val & DISPLAY_PLANE_ENABLE) == 0) 1884 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1803 return; 1885 return;
1804 1886
1805 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1887 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1806 intel_flush_display_plane(dev_priv, plane); 1888 intel_flush_primary_plane(dev_priv, plane);
1807 intel_wait_for_vblank(dev_priv->dev, pipe); 1889 intel_wait_for_vblank(dev_priv->dev, pipe);
1808} 1890}
1809 1891
@@ -1839,10 +1921,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1839 alignment = 0; 1921 alignment = 0;
1840 break; 1922 break;
1841 case I915_TILING_Y: 1923 case I915_TILING_Y:
1842 /* Despite that we check this in framebuffer_init userspace can 1924 WARN(1, "Y tiled bo slipped through, driver bug!\n");
1843 * screw us over and change the tiling after the fact. Only
1844 * pinned buffers can't change their tiling. */
1845 DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
1846 return -EINVAL; 1925 return -EINVAL;
1847 default: 1926 default:
1848 BUG(); 1927 BUG();
@@ -2244,11 +2323,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2244 return ret; 2323 return ret;
2245 } 2324 }
2246 2325
2247 /* Update pipe size and adjust fitter if needed */ 2326 /*
2327 * Update pipe size and adjust fitter if needed: the reason for this is
2328 * that in compute_mode_changes we check the native mode (not the pfit
2329 * mode) to see if we can flip rather than do a full mode set. In the
2330 * fastboot case, we'll flip, but if we don't update the pipesrc and
2331 * pfit state, we'll end up with a big fb scanned out into the wrong
2332 * sized surface.
2333 *
2334 * To fix this properly, we need to hoist the checks up into
2335 * compute_mode_changes (or above), check the actual pfit state and
2336 * whether the platform allows pfit disable with pipe active, and only
2337 * then update the pipesrc and pfit state, even on the flip path.
2338 */
2248 if (i915_fastboot) { 2339 if (i915_fastboot) {
2340 const struct drm_display_mode *adjusted_mode =
2341 &intel_crtc->config.adjusted_mode;
2342
2249 I915_WRITE(PIPESRC(intel_crtc->pipe), 2343 I915_WRITE(PIPESRC(intel_crtc->pipe),
2250 ((crtc->mode.hdisplay - 1) << 16) | 2344 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2251 (crtc->mode.vdisplay - 1)); 2345 (adjusted_mode->crtc_vdisplay - 1));
2252 if (!intel_crtc->config.pch_pfit.enabled && 2346 if (!intel_crtc->config.pch_pfit.enabled &&
2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2347 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2348 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2873,6 +2967,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2873{ 2967{
2874 struct drm_device *dev = crtc->dev; 2968 struct drm_device *dev = crtc->dev;
2875 struct drm_i915_private *dev_priv = dev->dev_private; 2969 struct drm_i915_private *dev_priv = dev->dev_private;
2970 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2876 u32 divsel, phaseinc, auxdiv, phasedir = 0; 2971 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2877 u32 temp; 2972 u32 temp;
2878 2973
@@ -2890,14 +2985,14 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2890 SBI_ICLK); 2985 SBI_ICLK);
2891 2986
2892 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 2987 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2893 if (crtc->mode.clock == 20000) { 2988 if (clock == 20000) {
2894 auxdiv = 1; 2989 auxdiv = 1;
2895 divsel = 0x41; 2990 divsel = 0x41;
2896 phaseinc = 0x20; 2991 phaseinc = 0x20;
2897 } else { 2992 } else {
2898 /* The iCLK virtual clock root frequency is in MHz, 2993 /* The iCLK virtual clock root frequency is in MHz,
2899 * but the crtc->mode.clock in in KHz. To get the divisors, 2994 * but the adjusted_mode->crtc_clock in in KHz. To get the
2900 * it is necessary to divide one by another, so we 2995 * divisors, it is necessary to divide one by another, so we
2901 * convert the virtual clock precision to KHz here for higher 2996 * convert the virtual clock precision to KHz here for higher
2902 * precision. 2997 * precision.
2903 */ 2998 */
@@ -2905,7 +3000,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2905 u32 iclk_pi_range = 64; 3000 u32 iclk_pi_range = 64;
2906 u32 desired_divisor, msb_divisor_value, pi_value; 3001 u32 desired_divisor, msb_divisor_value, pi_value;
2907 3002
2908 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); 3003 desired_divisor = (iclk_virtual_root_freq / clock);
2909 msb_divisor_value = desired_divisor / iclk_pi_range; 3004 msb_divisor_value = desired_divisor / iclk_pi_range;
2910 pi_value = desired_divisor % iclk_pi_range; 3005 pi_value = desired_divisor % iclk_pi_range;
2911 3006
@@ -2921,7 +3016,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2921 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3016 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2922 3017
2923 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3018 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2924 crtc->mode.clock, 3019 clock,
2925 auxdiv, 3020 auxdiv,
2926 divsel, 3021 divsel,
2927 phasedir, 3022 phasedir,
@@ -3286,6 +3381,92 @@ static void intel_disable_planes(struct drm_crtc *crtc)
3286 intel_plane_disable(&intel_plane->base); 3381 intel_plane_disable(&intel_plane->base);
3287} 3382}
3288 3383
3384void hsw_enable_ips(struct intel_crtc *crtc)
3385{
3386 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3387
3388 if (!crtc->config.ips_enabled)
3389 return;
3390
3391 /* We can only enable IPS after we enable a plane and wait for a vblank.
3392 * We guarantee that the plane is enabled by calling intel_enable_ips
3393 * only after intel_enable_plane. And intel_enable_plane already waits
3394 * for a vblank, so all we need to do here is to enable the IPS bit. */
3395 assert_plane_enabled(dev_priv, crtc->plane);
3396 I915_WRITE(IPS_CTL, IPS_ENABLE);
3397
3398 /* The bit only becomes 1 in the next vblank, so this wait here is
3399 * essentially intel_wait_for_vblank. If we don't have this and don't
3400 * wait for vblanks until the end of crtc_enable, then the HW state
3401 * readout code will complain that the expected IPS_CTL value is not the
3402 * one we read. */
3403 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3404 DRM_ERROR("Timed out waiting for IPS enable\n");
3405}
3406
3407void hsw_disable_ips(struct intel_crtc *crtc)
3408{
3409 struct drm_device *dev = crtc->base.dev;
3410 struct drm_i915_private *dev_priv = dev->dev_private;
3411
3412 if (!crtc->config.ips_enabled)
3413 return;
3414
3415 assert_plane_enabled(dev_priv, crtc->plane);
3416 I915_WRITE(IPS_CTL, 0);
3417 POSTING_READ(IPS_CTL);
3418
3419 /* We need to wait for a vblank before we can disable the plane. */
3420 intel_wait_for_vblank(dev, crtc->pipe);
3421}
3422
3423/** Loads the palette/gamma unit for the CRTC with the prepared values */
3424static void intel_crtc_load_lut(struct drm_crtc *crtc)
3425{
3426 struct drm_device *dev = crtc->dev;
3427 struct drm_i915_private *dev_priv = dev->dev_private;
3428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3429 enum pipe pipe = intel_crtc->pipe;
3430 int palreg = PALETTE(pipe);
3431 int i;
3432 bool reenable_ips = false;
3433
3434 /* The clocks have to be on to load the palette. */
3435 if (!crtc->enabled || !intel_crtc->active)
3436 return;
3437
3438 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3439 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3440 assert_dsi_pll_enabled(dev_priv);
3441 else
3442 assert_pll_enabled(dev_priv, pipe);
3443 }
3444
3445 /* use legacy palette for Ironlake */
3446 if (HAS_PCH_SPLIT(dev))
3447 palreg = LGC_PALETTE(pipe);
3448
3449 /* Workaround : Do not read or write the pipe palette/gamma data while
3450 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3451 */
3452 if (intel_crtc->config.ips_enabled &&
3453 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3454 GAMMA_MODE_MODE_SPLIT)) {
3455 hsw_disable_ips(intel_crtc);
3456 reenable_ips = true;
3457 }
3458
3459 for (i = 0; i < 256; i++) {
3460 I915_WRITE(palreg + 4 * i,
3461 (intel_crtc->lut_r[i] << 16) |
3462 (intel_crtc->lut_g[i] << 8) |
3463 intel_crtc->lut_b[i]);
3464 }
3465
3466 if (reenable_ips)
3467 hsw_enable_ips(intel_crtc);
3468}
3469
3289static void ironlake_crtc_enable(struct drm_crtc *crtc) 3470static void ironlake_crtc_enable(struct drm_crtc *crtc)
3290{ 3471{
3291 struct drm_device *dev = crtc->dev; 3472 struct drm_device *dev = crtc->dev;
@@ -3305,8 +3486,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3305 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 3486 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3306 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 3487 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3307 3488
3308 intel_update_watermarks(dev);
3309
3310 for_each_encoder_on_crtc(dev, crtc, encoder) 3489 for_each_encoder_on_crtc(dev, crtc, encoder)
3311 if (encoder->pre_enable) 3490 if (encoder->pre_enable)
3312 encoder->pre_enable(encoder); 3491 encoder->pre_enable(encoder);
@@ -3329,9 +3508,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3329 */ 3508 */
3330 intel_crtc_load_lut(crtc); 3509 intel_crtc_load_lut(crtc);
3331 3510
3511 intel_update_watermarks(crtc);
3332 intel_enable_pipe(dev_priv, pipe, 3512 intel_enable_pipe(dev_priv, pipe,
3333 intel_crtc->config.has_pch_encoder); 3513 intel_crtc->config.has_pch_encoder, false);
3334 intel_enable_plane(dev_priv, plane, pipe); 3514 intel_enable_primary_plane(dev_priv, plane, pipe);
3335 intel_enable_planes(crtc); 3515 intel_enable_planes(crtc);
3336 intel_crtc_update_cursor(crtc, true); 3516 intel_crtc_update_cursor(crtc, true);
3337 3517
@@ -3365,34 +3545,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3365 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 3545 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3366} 3546}
3367 3547
3368static void hsw_enable_ips(struct intel_crtc *crtc) 3548static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3369{ 3549{
3370 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3550 struct drm_device *dev = crtc->dev;
3551 struct drm_i915_private *dev_priv = dev->dev_private;
3552 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3553 int pipe = intel_crtc->pipe;
3554 int plane = intel_crtc->plane;
3371 3555
3372 if (!crtc->config.ips_enabled) 3556 intel_enable_primary_plane(dev_priv, plane, pipe);
3373 return; 3557 intel_enable_planes(crtc);
3558 intel_crtc_update_cursor(crtc, true);
3374 3559
3375 /* We can only enable IPS after we enable a plane and wait for a vblank. 3560 hsw_enable_ips(intel_crtc);
3376 * We guarantee that the plane is enabled by calling intel_enable_ips 3561
3377 * only after intel_enable_plane. And intel_enable_plane already waits 3562 mutex_lock(&dev->struct_mutex);
3378 * for a vblank, so all we need to do here is to enable the IPS bit. */ 3563 intel_update_fbc(dev);
3379 assert_plane_enabled(dev_priv, crtc->plane); 3564 mutex_unlock(&dev->struct_mutex);
3380 I915_WRITE(IPS_CTL, IPS_ENABLE);
3381} 3565}
3382 3566
3383static void hsw_disable_ips(struct intel_crtc *crtc) 3567static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3384{ 3568{
3385 struct drm_device *dev = crtc->base.dev; 3569 struct drm_device *dev = crtc->dev;
3386 struct drm_i915_private *dev_priv = dev->dev_private; 3570 struct drm_i915_private *dev_priv = dev->dev_private;
3571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3572 int pipe = intel_crtc->pipe;
3573 int plane = intel_crtc->plane;
3387 3574
3388 if (!crtc->config.ips_enabled) 3575 intel_crtc_wait_for_pending_flips(crtc);
3389 return; 3576 drm_vblank_off(dev, pipe);
3390 3577
3391 assert_plane_enabled(dev_priv, crtc->plane); 3578 /* FBC must be disabled before disabling the plane on HSW. */
3392 I915_WRITE(IPS_CTL, 0); 3579 if (dev_priv->fbc.plane == plane)
3580 intel_disable_fbc(dev);
3393 3581
3394 /* We need to wait for a vblank before we can disable the plane. */ 3582 hsw_disable_ips(intel_crtc);
3395 intel_wait_for_vblank(dev, crtc->pipe); 3583
3584 intel_crtc_update_cursor(crtc, false);
3585 intel_disable_planes(crtc);
3586 intel_disable_primary_plane(dev_priv, plane, pipe);
3587}
3588
3589/*
3590 * This implements the workaround described in the "notes" section of the mode
3591 * set sequence documentation. When going from no pipes or single pipe to
3592 * multiple pipes, and planes are enabled after the pipe, we need to wait at
3593 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3594 */
3595static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3596{
3597 struct drm_device *dev = crtc->base.dev;
3598 struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3599
3600 /* We want to get the other_active_crtc only if there's only 1 other
3601 * active crtc. */
3602 list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3603 if (!crtc_it->active || crtc_it == crtc)
3604 continue;
3605
3606 if (other_active_crtc)
3607 return;
3608
3609 other_active_crtc = crtc_it;
3610 }
3611 if (!other_active_crtc)
3612 return;
3613
3614 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3615 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3396} 3616}
3397 3617
3398static void haswell_crtc_enable(struct drm_crtc *crtc) 3618static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3402,7 +3622,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3402 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3622 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3403 struct intel_encoder *encoder; 3623 struct intel_encoder *encoder;
3404 int pipe = intel_crtc->pipe; 3624 int pipe = intel_crtc->pipe;
3405 int plane = intel_crtc->plane;
3406 3625
3407 WARN_ON(!crtc->enabled); 3626 WARN_ON(!crtc->enabled);
3408 3627
@@ -3415,8 +3634,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3415 if (intel_crtc->config.has_pch_encoder) 3634 if (intel_crtc->config.has_pch_encoder)
3416 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 3635 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3417 3636
3418 intel_update_watermarks(dev);
3419
3420 if (intel_crtc->config.has_pch_encoder) 3637 if (intel_crtc->config.has_pch_encoder)
3421 dev_priv->display.fdi_link_train(crtc); 3638 dev_priv->display.fdi_link_train(crtc);
3422 3639
@@ -3437,23 +3654,22 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3437 intel_ddi_set_pipe_settings(crtc); 3654 intel_ddi_set_pipe_settings(crtc);
3438 intel_ddi_enable_transcoder_func(crtc); 3655 intel_ddi_enable_transcoder_func(crtc);
3439 3656
3657 intel_update_watermarks(crtc);
3440 intel_enable_pipe(dev_priv, pipe, 3658 intel_enable_pipe(dev_priv, pipe,
3441 intel_crtc->config.has_pch_encoder); 3659 intel_crtc->config.has_pch_encoder, false);
3442 intel_enable_plane(dev_priv, plane, pipe);
3443 intel_enable_planes(crtc);
3444 intel_crtc_update_cursor(crtc, true);
3445
3446 hsw_enable_ips(intel_crtc);
3447 3660
3448 if (intel_crtc->config.has_pch_encoder) 3661 if (intel_crtc->config.has_pch_encoder)
3449 lpt_pch_enable(crtc); 3662 lpt_pch_enable(crtc);
3450 3663
3451 mutex_lock(&dev->struct_mutex); 3664 for_each_encoder_on_crtc(dev, crtc, encoder) {
3452 intel_update_fbc(dev);
3453 mutex_unlock(&dev->struct_mutex);
3454
3455 for_each_encoder_on_crtc(dev, crtc, encoder)
3456 encoder->enable(encoder); 3665 encoder->enable(encoder);
3666 intel_opregion_notify_encoder(encoder, true);
3667 }
3668
3669 /* If we change the relative order between pipe/planes enabling, we need
3670 * to change the workaround. */
3671 haswell_mode_set_planes_workaround(intel_crtc);
3672 haswell_crtc_enable_planes(crtc);
3457 3673
3458 /* 3674 /*
3459 * There seems to be a race in PCH platform hw (at least on some 3675 * There seems to be a race in PCH platform hw (at least on some
@@ -3506,7 +3722,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3506 3722
3507 intel_crtc_update_cursor(crtc, false); 3723 intel_crtc_update_cursor(crtc, false);
3508 intel_disable_planes(crtc); 3724 intel_disable_planes(crtc);
3509 intel_disable_plane(dev_priv, plane, pipe); 3725 intel_disable_primary_plane(dev_priv, plane, pipe);
3510 3726
3511 if (intel_crtc->config.has_pch_encoder) 3727 if (intel_crtc->config.has_pch_encoder)
3512 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 3728 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3547,7 +3763,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3547 } 3763 }
3548 3764
3549 intel_crtc->active = false; 3765 intel_crtc->active = false;
3550 intel_update_watermarks(dev); 3766 intel_update_watermarks(crtc);
3551 3767
3552 mutex_lock(&dev->struct_mutex); 3768 mutex_lock(&dev->struct_mutex);
3553 intel_update_fbc(dev); 3769 intel_update_fbc(dev);
@@ -3561,27 +3777,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3561 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3777 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3562 struct intel_encoder *encoder; 3778 struct intel_encoder *encoder;
3563 int pipe = intel_crtc->pipe; 3779 int pipe = intel_crtc->pipe;
3564 int plane = intel_crtc->plane;
3565 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 3780 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3566 3781
3567 if (!intel_crtc->active) 3782 if (!intel_crtc->active)
3568 return; 3783 return;
3569 3784
3570 for_each_encoder_on_crtc(dev, crtc, encoder) 3785 haswell_crtc_disable_planes(crtc);
3571 encoder->disable(encoder);
3572 3786
3573 intel_crtc_wait_for_pending_flips(crtc); 3787 for_each_encoder_on_crtc(dev, crtc, encoder) {
3574 drm_vblank_off(dev, pipe); 3788 intel_opregion_notify_encoder(encoder, false);
3575 3789 encoder->disable(encoder);
3576 /* FBC must be disabled before disabling the plane on HSW. */ 3790 }
3577 if (dev_priv->fbc.plane == plane)
3578 intel_disable_fbc(dev);
3579
3580 hsw_disable_ips(intel_crtc);
3581
3582 intel_crtc_update_cursor(crtc, false);
3583 intel_disable_planes(crtc);
3584 intel_disable_plane(dev_priv, plane, pipe);
3585 3791
3586 if (intel_crtc->config.has_pch_encoder) 3792 if (intel_crtc->config.has_pch_encoder)
3587 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 3793 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3604,7 +3810,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3604 } 3810 }
3605 3811
3606 intel_crtc->active = false; 3812 intel_crtc->active = false;
3607 intel_update_watermarks(dev); 3813 intel_update_watermarks(crtc);
3608 3814
3609 mutex_lock(&dev->struct_mutex); 3815 mutex_lock(&dev->struct_mutex);
3610 intel_update_fbc(dev); 3816 intel_update_fbc(dev);
@@ -3696,6 +3902,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3696 struct intel_encoder *encoder; 3902 struct intel_encoder *encoder;
3697 int pipe = intel_crtc->pipe; 3903 int pipe = intel_crtc->pipe;
3698 int plane = intel_crtc->plane; 3904 int plane = intel_crtc->plane;
3905 bool is_dsi;
3699 3906
3700 WARN_ON(!crtc->enabled); 3907 WARN_ON(!crtc->enabled);
3701 3908
@@ -3703,13 +3910,15 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3703 return; 3910 return;
3704 3911
3705 intel_crtc->active = true; 3912 intel_crtc->active = true;
3706 intel_update_watermarks(dev);
3707 3913
3708 for_each_encoder_on_crtc(dev, crtc, encoder) 3914 for_each_encoder_on_crtc(dev, crtc, encoder)
3709 if (encoder->pre_pll_enable) 3915 if (encoder->pre_pll_enable)
3710 encoder->pre_pll_enable(encoder); 3916 encoder->pre_pll_enable(encoder);
3711 3917
3712 vlv_enable_pll(intel_crtc); 3918 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
3919
3920 if (!is_dsi)
3921 vlv_enable_pll(intel_crtc);
3713 3922
3714 for_each_encoder_on_crtc(dev, crtc, encoder) 3923 for_each_encoder_on_crtc(dev, crtc, encoder)
3715 if (encoder->pre_enable) 3924 if (encoder->pre_enable)
@@ -3719,8 +3928,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3719 3928
3720 intel_crtc_load_lut(crtc); 3929 intel_crtc_load_lut(crtc);
3721 3930
3722 intel_enable_pipe(dev_priv, pipe, false); 3931 intel_update_watermarks(crtc);
3723 intel_enable_plane(dev_priv, plane, pipe); 3932 intel_enable_pipe(dev_priv, pipe, false, is_dsi);
3933 intel_enable_primary_plane(dev_priv, plane, pipe);
3724 intel_enable_planes(crtc); 3934 intel_enable_planes(crtc);
3725 intel_crtc_update_cursor(crtc, true); 3935 intel_crtc_update_cursor(crtc, true);
3726 3936
@@ -3745,7 +3955,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3745 return; 3955 return;
3746 3956
3747 intel_crtc->active = true; 3957 intel_crtc->active = true;
3748 intel_update_watermarks(dev);
3749 3958
3750 for_each_encoder_on_crtc(dev, crtc, encoder) 3959 for_each_encoder_on_crtc(dev, crtc, encoder)
3751 if (encoder->pre_enable) 3960 if (encoder->pre_enable)
@@ -3757,8 +3966,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3757 3966
3758 intel_crtc_load_lut(crtc); 3967 intel_crtc_load_lut(crtc);
3759 3968
3760 intel_enable_pipe(dev_priv, pipe, false); 3969 intel_update_watermarks(crtc);
3761 intel_enable_plane(dev_priv, plane, pipe); 3970 intel_enable_pipe(dev_priv, pipe, false, false);
3971 intel_enable_primary_plane(dev_priv, plane, pipe);
3762 intel_enable_planes(crtc); 3972 intel_enable_planes(crtc);
3763 /* The fixup needs to happen before cursor is enabled */ 3973 /* The fixup needs to happen before cursor is enabled */
3764 if (IS_G4X(dev)) 3974 if (IS_G4X(dev))
@@ -3814,7 +4024,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3814 intel_crtc_dpms_overlay(intel_crtc, false); 4024 intel_crtc_dpms_overlay(intel_crtc, false);
3815 intel_crtc_update_cursor(crtc, false); 4025 intel_crtc_update_cursor(crtc, false);
3816 intel_disable_planes(crtc); 4026 intel_disable_planes(crtc);
3817 intel_disable_plane(dev_priv, plane, pipe); 4027 intel_disable_primary_plane(dev_priv, plane, pipe);
3818 4028
3819 intel_disable_pipe(dev_priv, pipe); 4029 intel_disable_pipe(dev_priv, pipe);
3820 4030
@@ -3824,11 +4034,15 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3824 if (encoder->post_disable) 4034 if (encoder->post_disable)
3825 encoder->post_disable(encoder); 4035 encoder->post_disable(encoder);
3826 4036
3827 i9xx_disable_pll(dev_priv, pipe); 4037 if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4038 vlv_disable_pll(dev_priv, pipe);
4039 else if (!IS_VALLEYVIEW(dev))
4040 i9xx_disable_pll(dev_priv, pipe);
3828 4041
3829 intel_crtc->active = false; 4042 intel_crtc->active = false;
4043 intel_update_watermarks(crtc);
4044
3830 intel_update_fbc(dev); 4045 intel_update_fbc(dev);
3831 intel_update_watermarks(dev);
3832} 4046}
3833 4047
3834static void i9xx_crtc_off(struct drm_crtc *crtc) 4048static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3902,6 +4116,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3902 dev_priv->display.off(crtc); 4116 dev_priv->display.off(crtc);
3903 4117
3904 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 4118 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4119 assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
3905 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 4120 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3906 4121
3907 if (crtc->fb) { 4122 if (crtc->fb) {
@@ -4091,8 +4306,7 @@ retry:
4091 */ 4306 */
4092 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4307 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4093 4308
4094 fdi_dotclock = adjusted_mode->clock; 4309 fdi_dotclock = adjusted_mode->crtc_clock;
4095 fdi_dotclock /= pipe_config->pixel_multiplier;
4096 4310
4097 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 4311 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4098 pipe_config->pipe_bpp); 4312 pipe_config->pipe_bpp);
@@ -4134,13 +4348,39 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
4134 struct drm_device *dev = crtc->base.dev; 4348 struct drm_device *dev = crtc->base.dev;
4135 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 4349 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4136 4350
4137 if (HAS_PCH_SPLIT(dev)) { 4351 /* FIXME should check pixel clock limits on all platforms */
4138 /* FDI link clock is fixed at 2.7G */ 4352 if (INTEL_INFO(dev)->gen < 4) {
4139 if (pipe_config->requested_mode.clock * 3 4353 struct drm_i915_private *dev_priv = dev->dev_private;
4140 > IRONLAKE_FDI_FREQ * 4) 4354 int clock_limit =
4355 dev_priv->display.get_display_clock_speed(dev);
4356
4357 /*
4358 * Enable pixel doubling when the dot clock
4359 * is > 90% of the (display) core speed.
4360 *
4361 * GDG double wide on either pipe,
4362 * otherwise pipe A only.
4363 */
4364 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
4365 adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4366 clock_limit *= 2;
4367 pipe_config->double_wide = true;
4368 }
4369
4370 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4141 return -EINVAL; 4371 return -EINVAL;
4142 } 4372 }
4143 4373
4374 /*
4375 * Pipe horizontal size must be even in:
4376 * - DVO ganged mode
4377 * - LVDS dual channel mode
4378 * - Double wide pipe
4379 */
4380 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4381 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
4382 pipe_config->pipe_src_w &= ~1;
4383
4144 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 4384 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4145 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 4385 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4146 */ 4386 */
@@ -4304,28 +4544,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4304 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4544 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4305} 4545}
4306 4546
4307static int vlv_get_refclk(struct drm_crtc *crtc)
4308{
4309 struct drm_device *dev = crtc->dev;
4310 struct drm_i915_private *dev_priv = dev->dev_private;
4311 int refclk = 27000; /* for DP & HDMI */
4312
4313 return 100000; /* only one validated so far */
4314
4315 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4316 refclk = 96000;
4317 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4318 if (intel_panel_use_ssc(dev_priv))
4319 refclk = 100000;
4320 else
4321 refclk = 96000;
4322 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4323 refclk = 100000;
4324 }
4325
4326 return refclk;
4327}
4328
4329static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 4547static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4330{ 4548{
4331 struct drm_device *dev = crtc->dev; 4549 struct drm_device *dev = crtc->dev;
@@ -4333,7 +4551,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4333 int refclk; 4551 int refclk;
4334 4552
4335 if (IS_VALLEYVIEW(dev)) { 4553 if (IS_VALLEYVIEW(dev)) {
4336 refclk = vlv_get_refclk(crtc); 4554 refclk = 100000;
4337 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4555 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4338 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4556 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4339 refclk = dev_priv->vbt.lvds_ssc_freq * 1000; 4557 refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4391,7 +4609,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4391 } 4609 }
4392} 4610}
4393 4611
4394static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv) 4612static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
4613 pipe)
4395{ 4614{
4396 u32 reg_val; 4615 u32 reg_val;
4397 4616
@@ -4399,24 +4618,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
4399 * PLLB opamp always calibrates to max value of 0x3f, force enable it 4618 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4400 * and set it to a reasonable value instead. 4619 * and set it to a reasonable value instead.
4401 */ 4620 */
4402 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); 4621 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
4403 reg_val &= 0xffffff00; 4622 reg_val &= 0xffffff00;
4404 reg_val |= 0x00000030; 4623 reg_val |= 0x00000030;
4405 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4624 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
4406 4625
4407 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); 4626 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
4408 reg_val &= 0x8cffffff; 4627 reg_val &= 0x8cffffff;
4409 reg_val = 0x8c000000; 4628 reg_val = 0x8c000000;
4410 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4629 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
4411 4630
4412 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); 4631 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
4413 reg_val &= 0xffffff00; 4632 reg_val &= 0xffffff00;
4414 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4633 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
4415 4634
4416 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); 4635 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
4417 reg_val &= 0x00ffffff; 4636 reg_val &= 0x00ffffff;
4418 reg_val |= 0xb0000000; 4637 reg_val |= 0xb0000000;
4419 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4638 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
4420} 4639}
4421 4640
4422static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 4641static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4482,18 +4701,18 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4482 4701
4483 /* PLL B needs special handling */ 4702 /* PLL B needs special handling */
4484 if (pipe) 4703 if (pipe)
4485 vlv_pllb_recal_opamp(dev_priv); 4704 vlv_pllb_recal_opamp(dev_priv, pipe);
4486 4705
4487 /* Set up Tx target for periodic Rcomp update */ 4706 /* Set up Tx target for periodic Rcomp update */
4488 vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f); 4707 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
4489 4708
4490 /* Disable target IRef on PLL */ 4709 /* Disable target IRef on PLL */
4491 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe)); 4710 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
4492 reg_val &= 0x00ffffff; 4711 reg_val &= 0x00ffffff;
4493 vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val); 4712 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
4494 4713
4495 /* Disable fast lock */ 4714 /* Disable fast lock */
4496 vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610); 4715 vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
4497 4716
4498 /* Set idtafcrecal before PLL is enabled */ 4717 /* Set idtafcrecal before PLL is enabled */
4499 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4718 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4507,55 +4726,55 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4507 * Note: don't use the DAC post divider as it seems unstable. 4726 * Note: don't use the DAC post divider as it seems unstable.
4508 */ 4727 */
4509 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 4728 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4510 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4729 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
4511 4730
4512 mdiv |= DPIO_ENABLE_CALIBRATION; 4731 mdiv |= DPIO_ENABLE_CALIBRATION;
4513 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4732 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
4514 4733
4515 /* Set HBR and RBR LPF coefficients */ 4734 /* Set HBR and RBR LPF coefficients */
4516 if (crtc->config.port_clock == 162000 || 4735 if (crtc->config.port_clock == 162000 ||
4517 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 4736 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4518 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4737 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4519 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4738 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
4520 0x009f0003); 4739 0x009f0003);
4521 else 4740 else
4522 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4741 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
4523 0x00d0000f); 4742 0x00d0000f);
4524 4743
4525 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 4744 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4526 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 4745 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4527 /* Use SSC source */ 4746 /* Use SSC source */
4528 if (!pipe) 4747 if (!pipe)
4529 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4748 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4530 0x0df40000); 4749 0x0df40000);
4531 else 4750 else
4532 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4751 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4533 0x0df70000); 4752 0x0df70000);
4534 } else { /* HDMI or VGA */ 4753 } else { /* HDMI or VGA */
4535 /* Use bend source */ 4754 /* Use bend source */
4536 if (!pipe) 4755 if (!pipe)
4537 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4756 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4538 0x0df70000); 4757 0x0df70000);
4539 else 4758 else
4540 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4759 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4541 0x0df40000); 4760 0x0df40000);
4542 } 4761 }
4543 4762
4544 coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe)); 4763 coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
4545 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 4764 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4546 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 4765 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4547 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 4766 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4548 coreclk |= 0x01000000; 4767 coreclk |= 0x01000000;
4549 vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk); 4768 vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
4550 4769
4551 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); 4770 vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
4552 4771
4553 /* Enable DPIO clock input */ 4772 /* Enable DPIO clock input */
4554 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4773 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4555 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4774 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4556 if (pipe) 4775 /* We should never disable this, set it here for state tracking */
4776 if (pipe == PIPE_B)
4557 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 4777 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4558
4559 dpll |= DPLL_VCO_ENABLE; 4778 dpll |= DPLL_VCO_ENABLE;
4560 crtc->config.dpll_hw_state.dpll = dpll; 4779 crtc->config.dpll_hw_state.dpll = dpll;
4561 4780
@@ -4693,7 +4912,6 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4693 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4912 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4694 struct drm_display_mode *adjusted_mode = 4913 struct drm_display_mode *adjusted_mode =
4695 &intel_crtc->config.adjusted_mode; 4914 &intel_crtc->config.adjusted_mode;
4696 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4697 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; 4915 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
4698 4916
4699 /* We need to be careful not to changed the adjusted mode, for otherwise 4917 /* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4746,7 +4964,8 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4746 * always be the user's requested size. 4964 * always be the user's requested size.
4747 */ 4965 */
4748 I915_WRITE(PIPESRC(pipe), 4966 I915_WRITE(PIPESRC(pipe),
4749 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4967 ((intel_crtc->config.pipe_src_w - 1) << 16) |
4968 (intel_crtc->config.pipe_src_h - 1));
4750} 4969}
4751 4970
4752static void intel_get_pipe_timings(struct intel_crtc *crtc, 4971static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4784,8 +5003,11 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
4784 } 5003 }
4785 5004
4786 tmp = I915_READ(PIPESRC(crtc->pipe)); 5005 tmp = I915_READ(PIPESRC(crtc->pipe));
4787 pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1; 5006 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4788 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; 5007 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5008
5009 pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5010 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4789} 5011}
4790 5012
4791static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, 5013static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4805,7 +5027,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4805 5027
4806 crtc->mode.flags = pipe_config->adjusted_mode.flags; 5028 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4807 5029
4808 crtc->mode.clock = pipe_config->adjusted_mode.clock; 5030 crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
4809 crtc->mode.flags |= pipe_config->adjusted_mode.flags; 5031 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4810} 5032}
4811 5033
@@ -4821,17 +5043,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4821 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) 5043 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4822 pipeconf |= PIPECONF_ENABLE; 5044 pipeconf |= PIPECONF_ENABLE;
4823 5045
4824 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 5046 if (intel_crtc->config.double_wide)
4825 /* Enable pixel doubling when the dot clock is > 90% of the (display) 5047 pipeconf |= PIPECONF_DOUBLE_WIDE;
4826 * core speed.
4827 *
4828 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4829 * pipe == 0 check?
4830 */
4831 if (intel_crtc->config.requested_mode.clock >
4832 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4833 pipeconf |= PIPECONF_DOUBLE_WIDE;
4834 }
4835 5048
4836 /* only g4x and later have fancy bpc/dither controls */ 5049 /* only g4x and later have fancy bpc/dither controls */
4837 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 5050 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4885,14 +5098,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4885 struct drm_device *dev = crtc->dev; 5098 struct drm_device *dev = crtc->dev;
4886 struct drm_i915_private *dev_priv = dev->dev_private; 5099 struct drm_i915_private *dev_priv = dev->dev_private;
4887 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4888 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4889 int pipe = intel_crtc->pipe; 5101 int pipe = intel_crtc->pipe;
4890 int plane = intel_crtc->plane; 5102 int plane = intel_crtc->plane;
4891 int refclk, num_connectors = 0; 5103 int refclk, num_connectors = 0;
4892 intel_clock_t clock, reduced_clock; 5104 intel_clock_t clock, reduced_clock;
4893 u32 dspcntr; 5105 u32 dspcntr;
4894 bool ok, has_reduced_clock = false; 5106 bool ok, has_reduced_clock = false;
4895 bool is_lvds = false; 5107 bool is_lvds = false, is_dsi = false;
4896 struct intel_encoder *encoder; 5108 struct intel_encoder *encoder;
4897 const intel_limit_t *limit; 5109 const intel_limit_t *limit;
4898 int ret; 5110 int ret;
@@ -4902,42 +5114,49 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4902 case INTEL_OUTPUT_LVDS: 5114 case INTEL_OUTPUT_LVDS:
4903 is_lvds = true; 5115 is_lvds = true;
4904 break; 5116 break;
5117 case INTEL_OUTPUT_DSI:
5118 is_dsi = true;
5119 break;
4905 } 5120 }
4906 5121
4907 num_connectors++; 5122 num_connectors++;
4908 } 5123 }
4909 5124
4910 refclk = i9xx_get_refclk(crtc, num_connectors); 5125 if (is_dsi)
5126 goto skip_dpll;
4911 5127
4912 /* 5128 if (!intel_crtc->config.clock_set) {
4913 * Returns a set of divisors for the desired target clock with the given 5129 refclk = i9xx_get_refclk(crtc, num_connectors);
4914 * refclk, or FALSE. The returned values represent the clock equation:
4915 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4916 */
4917 limit = intel_limit(crtc, refclk);
4918 ok = dev_priv->display.find_dpll(limit, crtc,
4919 intel_crtc->config.port_clock,
4920 refclk, NULL, &clock);
4921 if (!ok && !intel_crtc->config.clock_set) {
4922 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4923 return -EINVAL;
4924 }
4925 5130
4926 if (is_lvds && dev_priv->lvds_downclock_avail) {
4927 /* 5131 /*
4928 * Ensure we match the reduced clock's P to the target clock. 5132 * Returns a set of divisors for the desired target clock with
4929 * If the clocks don't match, we can't switch the display clock 5133 * the given refclk, or FALSE. The returned values represent
4930 * by using the FP0/FP1. In such case we will disable the LVDS 5134 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
4931 * downclock feature. 5135 * 2) / p1 / p2.
4932 */ 5136 */
4933 has_reduced_clock = 5137 limit = intel_limit(crtc, refclk);
4934 dev_priv->display.find_dpll(limit, crtc, 5138 ok = dev_priv->display.find_dpll(limit, crtc,
4935 dev_priv->lvds_downclock, 5139 intel_crtc->config.port_clock,
4936 refclk, &clock, 5140 refclk, NULL, &clock);
4937 &reduced_clock); 5141 if (!ok) {
4938 } 5142 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4939 /* Compat-code for transition, will disappear. */ 5143 return -EINVAL;
4940 if (!intel_crtc->config.clock_set) { 5144 }
5145
5146 if (is_lvds && dev_priv->lvds_downclock_avail) {
5147 /*
5148 * Ensure we match the reduced clock's P to the target
5149 * clock. If the clocks don't match, we can't switch
5150 * the display clock by using the FP0/FP1. In such case
5151 * we will disable the LVDS downclock feature.
5152 */
5153 has_reduced_clock =
5154 dev_priv->display.find_dpll(limit, crtc,
5155 dev_priv->lvds_downclock,
5156 refclk, &clock,
5157 &reduced_clock);
5158 }
5159 /* Compat-code for transition, will disappear. */
4941 intel_crtc->config.dpll.n = clock.n; 5160 intel_crtc->config.dpll.n = clock.n;
4942 intel_crtc->config.dpll.m1 = clock.m1; 5161 intel_crtc->config.dpll.m1 = clock.m1;
4943 intel_crtc->config.dpll.m2 = clock.m2; 5162 intel_crtc->config.dpll.m2 = clock.m2;
@@ -4945,17 +5164,19 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4945 intel_crtc->config.dpll.p2 = clock.p2; 5164 intel_crtc->config.dpll.p2 = clock.p2;
4946 } 5165 }
4947 5166
4948 if (IS_GEN2(dev)) 5167 if (IS_GEN2(dev)) {
4949 i8xx_update_pll(intel_crtc, 5168 i8xx_update_pll(intel_crtc,
4950 has_reduced_clock ? &reduced_clock : NULL, 5169 has_reduced_clock ? &reduced_clock : NULL,
4951 num_connectors); 5170 num_connectors);
4952 else if (IS_VALLEYVIEW(dev)) 5171 } else if (IS_VALLEYVIEW(dev)) {
4953 vlv_update_pll(intel_crtc); 5172 vlv_update_pll(intel_crtc);
4954 else 5173 } else {
4955 i9xx_update_pll(intel_crtc, 5174 i9xx_update_pll(intel_crtc,
4956 has_reduced_clock ? &reduced_clock : NULL, 5175 has_reduced_clock ? &reduced_clock : NULL,
4957 num_connectors); 5176 num_connectors);
5177 }
4958 5178
5179skip_dpll:
4959 /* Set up the display plane register */ 5180 /* Set up the display plane register */
4960 dspcntr = DISPPLANE_GAMMA_ENABLE; 5181 dspcntr = DISPPLANE_GAMMA_ENABLE;
4961 5182
@@ -4972,8 +5193,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4972 * which should always be the user's requested size. 5193 * which should always be the user's requested size.
4973 */ 5194 */
4974 I915_WRITE(DSPSIZE(plane), 5195 I915_WRITE(DSPSIZE(plane),
4975 ((mode->vdisplay - 1) << 16) | 5196 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4976 (mode->hdisplay - 1)); 5197 (intel_crtc->config.pipe_src_w - 1));
4977 I915_WRITE(DSPPOS(plane), 0); 5198 I915_WRITE(DSPPOS(plane), 0);
4978 5199
4979 i9xx_set_pipeconf(intel_crtc); 5200 i9xx_set_pipeconf(intel_crtc);
@@ -4983,8 +5204,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4983 5204
4984 ret = intel_pipe_set_base(crtc, x, y, fb); 5205 ret = intel_pipe_set_base(crtc, x, y, fb);
4985 5206
4986 intel_update_watermarks(dev);
4987
4988 return ret; 5207 return ret;
4989} 5208}
4990 5209
@@ -5015,6 +5234,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5015 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 5234 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5016} 5235}
5017 5236
5237static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5238 struct intel_crtc_config *pipe_config)
5239{
5240 struct drm_device *dev = crtc->base.dev;
5241 struct drm_i915_private *dev_priv = dev->dev_private;
5242 int pipe = pipe_config->cpu_transcoder;
5243 intel_clock_t clock;
5244 u32 mdiv;
5245 int refclk = 100000;
5246
5247 mutex_lock(&dev_priv->dpio_lock);
5248 mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
5249 mutex_unlock(&dev_priv->dpio_lock);
5250
5251 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5252 clock.m2 = mdiv & DPIO_M2DIV_MASK;
5253 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5254 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5255 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5256
5257 vlv_clock(refclk, &clock);
5258
5259 /* clock.dot is the fast clock */
5260 pipe_config->port_clock = clock.dot / 5;
5261}
5262
5018static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 5263static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5019 struct intel_crtc_config *pipe_config) 5264 struct intel_crtc_config *pipe_config)
5020{ 5265{
@@ -5045,6 +5290,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5045 } 5290 }
5046 } 5291 }
5047 5292
5293 if (INTEL_INFO(dev)->gen < 4)
5294 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5295
5048 intel_get_pipe_timings(crtc, pipe_config); 5296 intel_get_pipe_timings(crtc, pipe_config);
5049 5297
5050 i9xx_get_pfit_config(crtc, pipe_config); 5298 i9xx_get_pfit_config(crtc, pipe_config);
@@ -5077,6 +5325,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5077 DPLL_PORTB_READY_MASK); 5325 DPLL_PORTB_READY_MASK);
5078 } 5326 }
5079 5327
5328 if (IS_VALLEYVIEW(dev))
5329 vlv_crtc_clock_get(crtc, pipe_config);
5330 else
5331 i9xx_crtc_clock_get(crtc, pipe_config);
5332
5080 return true; 5333 return true;
5081} 5334}
5082 5335
@@ -5819,11 +6072,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5819 else 6072 else
5820 intel_crtc->lowfreq_avail = false; 6073 intel_crtc->lowfreq_avail = false;
5821 6074
5822 if (intel_crtc->config.has_pch_encoder) {
5823 pll = intel_crtc_to_shared_dpll(intel_crtc);
5824
5825 }
5826
5827 intel_set_pipe_timings(intel_crtc); 6075 intel_set_pipe_timings(intel_crtc);
5828 6076
5829 if (intel_crtc->config.has_pch_encoder) { 6077 if (intel_crtc->config.has_pch_encoder) {
@@ -5839,25 +6087,67 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5839 6087
5840 ret = intel_pipe_set_base(crtc, x, y, fb); 6088 ret = intel_pipe_set_base(crtc, x, y, fb);
5841 6089
5842 intel_update_watermarks(dev);
5843
5844 return ret; 6090 return ret;
5845} 6091}
5846 6092
5847static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 6093static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5848 struct intel_crtc_config *pipe_config) 6094 struct intel_link_m_n *m_n)
6095{
6096 struct drm_device *dev = crtc->base.dev;
6097 struct drm_i915_private *dev_priv = dev->dev_private;
6098 enum pipe pipe = crtc->pipe;
6099
6100 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
6101 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
6102 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
6103 & ~TU_SIZE_MASK;
6104 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
6105 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
6106 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6107}
6108
6109static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6110 enum transcoder transcoder,
6111 struct intel_link_m_n *m_n)
5849{ 6112{
5850 struct drm_device *dev = crtc->base.dev; 6113 struct drm_device *dev = crtc->base.dev;
5851 struct drm_i915_private *dev_priv = dev->dev_private; 6114 struct drm_i915_private *dev_priv = dev->dev_private;
5852 enum transcoder transcoder = pipe_config->cpu_transcoder; 6115 enum pipe pipe = crtc->pipe;
6116
6117 if (INTEL_INFO(dev)->gen >= 5) {
6118 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
6119 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
6120 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6121 & ~TU_SIZE_MASK;
6122 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6123 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6124 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6125 } else {
6126 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
6127 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
6128 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
6129 & ~TU_SIZE_MASK;
6130 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
6131 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
6132 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6133 }
6134}
5853 6135
5854 pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder)); 6136void intel_dp_get_m_n(struct intel_crtc *crtc,
5855 pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder)); 6137 struct intel_crtc_config *pipe_config)
5856 pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 6138{
5857 & ~TU_SIZE_MASK; 6139 if (crtc->config.has_pch_encoder)
5858 pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 6140 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5859 pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 6141 else
5860 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 6142 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6143 &pipe_config->dp_m_n);
6144}
6145
6146static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6147 struct intel_crtc_config *pipe_config)
6148{
6149 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6150 &pipe_config->fdi_m_n);
5861} 6151}
5862 6152
5863static void ironlake_get_pfit_config(struct intel_crtc *crtc, 6153static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5946,6 +6236,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5946 pipe_config->pixel_multiplier = 6236 pipe_config->pixel_multiplier =
5947 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 6237 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5948 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 6238 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6239
6240 ironlake_pch_clock_get(crtc, pipe_config);
5949 } else { 6241 } else {
5950 pipe_config->pixel_multiplier = 1; 6242 pipe_config->pixel_multiplier = 1;
5951 } 6243 }
@@ -6002,8 +6294,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6002 * register. Callers should take care of disabling all the display engine 6294 * register. Callers should take care of disabling all the display engine
6003 * functions, doing the mode unset, fixing interrupts, etc. 6295 * functions, doing the mode unset, fixing interrupts, etc.
6004 */ 6296 */
6005void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 6297static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6006 bool switch_to_fclk, bool allow_power_down) 6298 bool switch_to_fclk, bool allow_power_down)
6007{ 6299{
6008 uint32_t val; 6300 uint32_t val;
6009 6301
@@ -6031,7 +6323,10 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6031 6323
6032 val = I915_READ(D_COMP); 6324 val = I915_READ(D_COMP);
6033 val |= D_COMP_COMP_DISABLE; 6325 val |= D_COMP_COMP_DISABLE;
6034 I915_WRITE(D_COMP, val); 6326 mutex_lock(&dev_priv->rps.hw_lock);
6327 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6328 DRM_ERROR("Failed to disable D_COMP\n");
6329 mutex_unlock(&dev_priv->rps.hw_lock);
6035 POSTING_READ(D_COMP); 6330 POSTING_READ(D_COMP);
6036 ndelay(100); 6331 ndelay(100);
6037 6332
@@ -6050,7 +6345,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6050 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 6345 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6051 * source. 6346 * source.
6052 */ 6347 */
6053void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 6348static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6054{ 6349{
6055 uint32_t val; 6350 uint32_t val;
6056 6351
@@ -6073,7 +6368,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6073 val = I915_READ(D_COMP); 6368 val = I915_READ(D_COMP);
6074 val |= D_COMP_COMP_FORCE; 6369 val |= D_COMP_COMP_FORCE;
6075 val &= ~D_COMP_COMP_DISABLE; 6370 val &= ~D_COMP_COMP_DISABLE;
6076 I915_WRITE(D_COMP, val); 6371 mutex_lock(&dev_priv->rps.hw_lock);
6372 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6373 DRM_ERROR("Failed to enable D_COMP\n");
6374 mutex_unlock(&dev_priv->rps.hw_lock);
6077 POSTING_READ(D_COMP); 6375 POSTING_READ(D_COMP);
6078 6376
6079 val = I915_READ(LCPLL_CTL); 6377 val = I915_READ(LCPLL_CTL);
@@ -6256,22 +6554,79 @@ static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6256 } 6554 }
6257} 6555}
6258 6556
6259static void haswell_modeset_global_resources(struct drm_device *dev) 6557#define for_each_power_domain(domain, mask) \
6558 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
6559 if ((1 << (domain)) & (mask))
6560
6561static unsigned long get_pipe_power_domains(struct drm_device *dev,
6562 enum pipe pipe, bool pfit_enabled)
6260{ 6563{
6261 bool enable = false; 6564 unsigned long mask;
6565 enum transcoder transcoder;
6566
6567 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
6568
6569 mask = BIT(POWER_DOMAIN_PIPE(pipe));
6570 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6571 if (pfit_enabled)
6572 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6573
6574 return mask;
6575}
6576
6577void intel_display_set_init_power(struct drm_device *dev, bool enable)
6578{
6579 struct drm_i915_private *dev_priv = dev->dev_private;
6580
6581 if (dev_priv->power_domains.init_power_on == enable)
6582 return;
6583
6584 if (enable)
6585 intel_display_power_get(dev, POWER_DOMAIN_INIT);
6586 else
6587 intel_display_power_put(dev, POWER_DOMAIN_INIT);
6588
6589 dev_priv->power_domains.init_power_on = enable;
6590}
6591
6592static void modeset_update_power_wells(struct drm_device *dev)
6593{
6594 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
6262 struct intel_crtc *crtc; 6595 struct intel_crtc *crtc;
6263 6596
6597 /*
6598 * First get all needed power domains, then put all unneeded, to avoid
6599 * any unnecessary toggling of the power wells.
6600 */
6264 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 6601 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6602 enum intel_display_power_domain domain;
6603
6265 if (!crtc->base.enabled) 6604 if (!crtc->base.enabled)
6266 continue; 6605 continue;
6267 6606
6268 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled || 6607 pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6269 crtc->config.cpu_transcoder != TRANSCODER_EDP) 6608 crtc->pipe,
6270 enable = true; 6609 crtc->config.pch_pfit.enabled);
6610
6611 for_each_power_domain(domain, pipe_domains[crtc->pipe])
6612 intel_display_power_get(dev, domain);
6271 } 6613 }
6272 6614
6273 intel_set_power_well(dev, enable); 6615 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6616 enum intel_display_power_domain domain;
6617
6618 for_each_power_domain(domain, crtc->enabled_power_domains)
6619 intel_display_power_put(dev, domain);
6620
6621 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
6622 }
6274 6623
6624 intel_display_set_init_power(dev, false);
6625}
6626
6627static void haswell_modeset_global_resources(struct drm_device *dev)
6628{
6629 modeset_update_power_wells(dev);
6275 hsw_update_package_c8(dev); 6630 hsw_update_package_c8(dev);
6276} 6631}
6277 6632
@@ -6310,8 +6665,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6310 6665
6311 ret = intel_pipe_set_base(crtc, x, y, fb); 6666 ret = intel_pipe_set_base(crtc, x, y, fb);
6312 6667
6313 intel_update_watermarks(dev);
6314
6315 return ret; 6668 return ret;
6316} 6669}
6317 6670
@@ -6419,6 +6772,44 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
6419 return 0; 6772 return 0;
6420} 6773}
6421 6774
6775static struct {
6776 int clock;
6777 u32 config;
6778} hdmi_audio_clock[] = {
6779 { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
6780 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
6781 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
6782 { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
6783 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
6784 { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
6785 { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
6786 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
6787 { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
6788 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
6789};
6790
6791/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
6792static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
6793{
6794 int i;
6795
6796 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
6797 if (mode->clock == hdmi_audio_clock[i].clock)
6798 break;
6799 }
6800
6801 if (i == ARRAY_SIZE(hdmi_audio_clock)) {
6802 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
6803 i = 1;
6804 }
6805
6806 DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
6807 hdmi_audio_clock[i].clock,
6808 hdmi_audio_clock[i].config);
6809
6810 return hdmi_audio_clock[i].config;
6811}
6812
6422static bool intel_eld_uptodate(struct drm_connector *connector, 6813static bool intel_eld_uptodate(struct drm_connector *connector,
6423 int reg_eldv, uint32_t bits_eldv, 6814 int reg_eldv, uint32_t bits_eldv,
6424 int reg_elda, uint32_t bits_elda, 6815 int reg_elda, uint32_t bits_elda,
@@ -6449,7 +6840,8 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
6449} 6840}
6450 6841
6451static void g4x_write_eld(struct drm_connector *connector, 6842static void g4x_write_eld(struct drm_connector *connector,
6452 struct drm_crtc *crtc) 6843 struct drm_crtc *crtc,
6844 struct drm_display_mode *mode)
6453{ 6845{
6454 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6846 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6455 uint8_t *eld = connector->eld; 6847 uint8_t *eld = connector->eld;
@@ -6489,7 +6881,8 @@ static void g4x_write_eld(struct drm_connector *connector,
6489} 6881}
6490 6882
6491static void haswell_write_eld(struct drm_connector *connector, 6883static void haswell_write_eld(struct drm_connector *connector,
6492 struct drm_crtc *crtc) 6884 struct drm_crtc *crtc,
6885 struct drm_display_mode *mode)
6493{ 6886{
6494 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6887 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6495 uint8_t *eld = connector->eld; 6888 uint8_t *eld = connector->eld;
@@ -6542,8 +6935,9 @@ static void haswell_write_eld(struct drm_connector *connector,
6542 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 6935 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6543 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 6936 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6544 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 6937 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6545 } else 6938 } else {
6546 I915_WRITE(aud_config, 0); 6939 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
6940 }
6547 6941
6548 if (intel_eld_uptodate(connector, 6942 if (intel_eld_uptodate(connector,
6549 aud_cntrl_st2, eldv, 6943 aud_cntrl_st2, eldv,
@@ -6576,7 +6970,8 @@ static void haswell_write_eld(struct drm_connector *connector,
6576} 6970}
6577 6971
6578static void ironlake_write_eld(struct drm_connector *connector, 6972static void ironlake_write_eld(struct drm_connector *connector,
6579 struct drm_crtc *crtc) 6973 struct drm_crtc *crtc,
6974 struct drm_display_mode *mode)
6580{ 6975{
6581 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6976 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6582 uint8_t *eld = connector->eld; 6977 uint8_t *eld = connector->eld;
@@ -6620,8 +7015,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
6620 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 7015 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6621 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 7016 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6622 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 7017 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6623 } else 7018 } else {
6624 I915_WRITE(aud_config, 0); 7019 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7020 }
6625 7021
6626 if (intel_eld_uptodate(connector, 7022 if (intel_eld_uptodate(connector,
6627 aud_cntrl_st2, eldv, 7023 aud_cntrl_st2, eldv,
@@ -6671,50 +7067,7 @@ void intel_write_eld(struct drm_encoder *encoder,
6671 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 7067 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6672 7068
6673 if (dev_priv->display.write_eld) 7069 if (dev_priv->display.write_eld)
6674 dev_priv->display.write_eld(connector, crtc); 7070 dev_priv->display.write_eld(connector, crtc, mode);
6675}
6676
6677/** Loads the palette/gamma unit for the CRTC with the prepared values */
6678void intel_crtc_load_lut(struct drm_crtc *crtc)
6679{
6680 struct drm_device *dev = crtc->dev;
6681 struct drm_i915_private *dev_priv = dev->dev_private;
6682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6683 enum pipe pipe = intel_crtc->pipe;
6684 int palreg = PALETTE(pipe);
6685 int i;
6686 bool reenable_ips = false;
6687
6688 /* The clocks have to be on to load the palette. */
6689 if (!crtc->enabled || !intel_crtc->active)
6690 return;
6691
6692 if (!HAS_PCH_SPLIT(dev_priv->dev))
6693 assert_pll_enabled(dev_priv, pipe);
6694
6695 /* use legacy palette for Ironlake */
6696 if (HAS_PCH_SPLIT(dev))
6697 palreg = LGC_PALETTE(pipe);
6698
6699 /* Workaround : Do not read or write the pipe palette/gamma data while
6700 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6701 */
6702 if (intel_crtc->config.ips_enabled &&
6703 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
6704 GAMMA_MODE_MODE_SPLIT)) {
6705 hsw_disable_ips(intel_crtc);
6706 reenable_ips = true;
6707 }
6708
6709 for (i = 0; i < 256; i++) {
6710 I915_WRITE(palreg + 4 * i,
6711 (intel_crtc->lut_r[i] << 16) |
6712 (intel_crtc->lut_g[i] << 8) |
6713 intel_crtc->lut_b[i]);
6714 }
6715
6716 if (reenable_ips)
6717 hsw_enable_ips(intel_crtc);
6718} 7071}
6719 7072
6720static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 7073static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6812,23 +7165,20 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6812 int pipe = intel_crtc->pipe; 7165 int pipe = intel_crtc->pipe;
6813 int x = intel_crtc->cursor_x; 7166 int x = intel_crtc->cursor_x;
6814 int y = intel_crtc->cursor_y; 7167 int y = intel_crtc->cursor_y;
6815 u32 base, pos; 7168 u32 base = 0, pos = 0;
6816 bool visible; 7169 bool visible;
6817 7170
6818 pos = 0; 7171 if (on)
6819
6820 if (on && crtc->enabled && crtc->fb) {
6821 base = intel_crtc->cursor_addr; 7172 base = intel_crtc->cursor_addr;
6822 if (x > (int) crtc->fb->width)
6823 base = 0;
6824 7173
6825 if (y > (int) crtc->fb->height) 7174 if (x >= intel_crtc->config.pipe_src_w)
6826 base = 0; 7175 base = 0;
6827 } else 7176
7177 if (y >= intel_crtc->config.pipe_src_h)
6828 base = 0; 7178 base = 0;
6829 7179
6830 if (x < 0) { 7180 if (x < 0) {
6831 if (x + intel_crtc->cursor_width < 0) 7181 if (x + intel_crtc->cursor_width <= 0)
6832 base = 0; 7182 base = 0;
6833 7183
6834 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 7184 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6837,7 +7187,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6837 pos |= x << CURSOR_X_SHIFT; 7187 pos |= x << CURSOR_X_SHIFT;
6838 7188
6839 if (y < 0) { 7189 if (y < 0) {
6840 if (y + intel_crtc->cursor_height < 0) 7190 if (y + intel_crtc->cursor_height <= 0)
6841 base = 0; 7191 base = 0;
6842 7192
6843 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 7193 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6980,8 +7330,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6980{ 7330{
6981 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7331 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6982 7332
6983 intel_crtc->cursor_x = x; 7333 intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
6984 intel_crtc->cursor_y = y; 7334 intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
6985 7335
6986 if (intel_crtc->active) 7336 if (intel_crtc->active)
6987 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 7337 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
@@ -6989,27 +7339,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6989 return 0; 7339 return 0;
6990} 7340}
6991 7341
6992/** Sets the color ramps on behalf of RandR */
6993void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6994 u16 blue, int regno)
6995{
6996 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6997
6998 intel_crtc->lut_r[regno] = red >> 8;
6999 intel_crtc->lut_g[regno] = green >> 8;
7000 intel_crtc->lut_b[regno] = blue >> 8;
7001}
7002
7003void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
7004 u16 *blue, int regno)
7005{
7006 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7007
7008 *red = intel_crtc->lut_r[regno] << 8;
7009 *green = intel_crtc->lut_g[regno] << 8;
7010 *blue = intel_crtc->lut_b[regno] << 8;
7011}
7012
7013static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 7342static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7014 u16 *blue, uint32_t start, uint32_t size) 7343 u16 *blue, uint32_t start, uint32_t size)
7015{ 7344{
@@ -7045,14 +7374,21 @@ intel_framebuffer_create(struct drm_device *dev,
7045 return ERR_PTR(-ENOMEM); 7374 return ERR_PTR(-ENOMEM);
7046 } 7375 }
7047 7376
7377 ret = i915_mutex_lock_interruptible(dev);
7378 if (ret)
7379 goto err;
7380
7048 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 7381 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7049 if (ret) { 7382 mutex_unlock(&dev->struct_mutex);
7050 drm_gem_object_unreference_unlocked(&obj->base); 7383 if (ret)
7051 kfree(intel_fb); 7384 goto err;
7052 return ERR_PTR(ret);
7053 }
7054 7385
7055 return &intel_fb->base; 7386 return &intel_fb->base;
7387err:
7388 drm_gem_object_unreference_unlocked(&obj->base);
7389 kfree(intel_fb);
7390
7391 return ERR_PTR(ret);
7056} 7392}
7057 7393
7058static u32 7394static u32
@@ -7095,6 +7431,7 @@ static struct drm_framebuffer *
7095mode_fits_in_fbdev(struct drm_device *dev, 7431mode_fits_in_fbdev(struct drm_device *dev,
7096 struct drm_display_mode *mode) 7432 struct drm_display_mode *mode)
7097{ 7433{
7434#ifdef CONFIG_DRM_I915_FBDEV
7098 struct drm_i915_private *dev_priv = dev->dev_private; 7435 struct drm_i915_private *dev_priv = dev->dev_private;
7099 struct drm_i915_gem_object *obj; 7436 struct drm_i915_gem_object *obj;
7100 struct drm_framebuffer *fb; 7437 struct drm_framebuffer *fb;
@@ -7115,6 +7452,9 @@ mode_fits_in_fbdev(struct drm_device *dev,
7115 return NULL; 7452 return NULL;
7116 7453
7117 return fb; 7454 return fb;
7455#else
7456 return NULL;
7457#endif
7118} 7458}
7119 7459
7120bool intel_get_load_detect_pipe(struct drm_connector *connector, 7460bool intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -7258,6 +7598,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
7258 mutex_unlock(&crtc->mutex); 7598 mutex_unlock(&crtc->mutex);
7259} 7599}
7260 7600
7601static int i9xx_pll_refclk(struct drm_device *dev,
7602 const struct intel_crtc_config *pipe_config)
7603{
7604 struct drm_i915_private *dev_priv = dev->dev_private;
7605 u32 dpll = pipe_config->dpll_hw_state.dpll;
7606
7607 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7608 return dev_priv->vbt.lvds_ssc_freq * 1000;
7609 else if (HAS_PCH_SPLIT(dev))
7610 return 120000;
7611 else if (!IS_GEN2(dev))
7612 return 96000;
7613 else
7614 return 48000;
7615}
7616
7261/* Returns the clock of the currently programmed mode of the given pipe. */ 7617/* Returns the clock of the currently programmed mode of the given pipe. */
7262static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 7618static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7263 struct intel_crtc_config *pipe_config) 7619 struct intel_crtc_config *pipe_config)
@@ -7265,14 +7621,15 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7265 struct drm_device *dev = crtc->base.dev; 7621 struct drm_device *dev = crtc->base.dev;
7266 struct drm_i915_private *dev_priv = dev->dev_private; 7622 struct drm_i915_private *dev_priv = dev->dev_private;
7267 int pipe = pipe_config->cpu_transcoder; 7623 int pipe = pipe_config->cpu_transcoder;
7268 u32 dpll = I915_READ(DPLL(pipe)); 7624 u32 dpll = pipe_config->dpll_hw_state.dpll;
7269 u32 fp; 7625 u32 fp;
7270 intel_clock_t clock; 7626 intel_clock_t clock;
7627 int refclk = i9xx_pll_refclk(dev, pipe_config);
7271 7628
7272 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 7629 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7273 fp = I915_READ(FP0(pipe)); 7630 fp = pipe_config->dpll_hw_state.fp0;
7274 else 7631 else
7275 fp = I915_READ(FP1(pipe)); 7632 fp = pipe_config->dpll_hw_state.fp1;
7276 7633
7277 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 7634 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7278 if (IS_PINEVIEW(dev)) { 7635 if (IS_PINEVIEW(dev)) {
@@ -7303,14 +7660,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7303 default: 7660 default:
7304 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 7661 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7305 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 7662 "mode\n", (int)(dpll & DPLL_MODE_MASK));
7306 pipe_config->adjusted_mode.clock = 0;
7307 return; 7663 return;
7308 } 7664 }
7309 7665
7310 if (IS_PINEVIEW(dev)) 7666 if (IS_PINEVIEW(dev))
7311 pineview_clock(96000, &clock); 7667 pineview_clock(refclk, &clock);
7312 else 7668 else
7313 i9xx_clock(96000, &clock); 7669 i9xx_clock(refclk, &clock);
7314 } else { 7670 } else {
7315 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 7671 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
7316 7672
@@ -7318,13 +7674,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7318 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 7674 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7319 DPLL_FPA01_P1_POST_DIV_SHIFT); 7675 DPLL_FPA01_P1_POST_DIV_SHIFT);
7320 clock.p2 = 14; 7676 clock.p2 = 14;
7321
7322 if ((dpll & PLL_REF_INPUT_MASK) ==
7323 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7324 /* XXX: might not be 66MHz */
7325 i9xx_clock(66000, &clock);
7326 } else
7327 i9xx_clock(48000, &clock);
7328 } else { 7677 } else {
7329 if (dpll & PLL_P1_DIVIDE_BY_TWO) 7678 if (dpll & PLL_P1_DIVIDE_BY_TWO)
7330 clock.p1 = 2; 7679 clock.p1 = 2;
@@ -7336,59 +7685,55 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7336 clock.p2 = 4; 7685 clock.p2 = 4;
7337 else 7686 else
7338 clock.p2 = 2; 7687 clock.p2 = 2;
7339
7340 i9xx_clock(48000, &clock);
7341 } 7688 }
7689
7690 i9xx_clock(refclk, &clock);
7342 } 7691 }
7343 7692
7344 pipe_config->adjusted_mode.clock = clock.dot; 7693 /*
7694 * This value includes pixel_multiplier. We will use
7695 * port_clock to compute adjusted_mode.crtc_clock in the
7696 * encoder's get_config() function.
7697 */
7698 pipe_config->port_clock = clock.dot;
7345} 7699}
7346 7700
7347static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 7701int intel_dotclock_calculate(int link_freq,
7348 struct intel_crtc_config *pipe_config) 7702 const struct intel_link_m_n *m_n)
7349{ 7703{
7350 struct drm_device *dev = crtc->base.dev;
7351 struct drm_i915_private *dev_priv = dev->dev_private;
7352 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7353 int link_freq, repeat;
7354 u64 clock;
7355 u32 link_m, link_n;
7356
7357 repeat = pipe_config->pixel_multiplier;
7358
7359 /* 7704 /*
7360 * The calculation for the data clock is: 7705 * The calculation for the data clock is:
7361 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp 7706 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7362 * But we want to avoid losing precison if possible, so: 7707 * But we want to avoid losing precison if possible, so:
7363 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp)) 7708 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7364 * 7709 *
7365 * and the link clock is simpler: 7710 * and the link clock is simpler:
7366 * link_clock = (m * link_clock * repeat) / n 7711 * link_clock = (m * link_clock) / n
7367 */ 7712 */
7368 7713
7369 /* 7714 if (!m_n->link_n)
7370 * We need to get the FDI or DP link clock here to derive 7715 return 0;
7371 * the M/N dividers.
7372 *
7373 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
7374 * For DP, it's either 1.62GHz or 2.7GHz.
7375 * We do our calculations in 10*MHz since we don't need much precison.
7376 */
7377 if (pipe_config->has_pch_encoder)
7378 link_freq = intel_fdi_link_freq(dev) * 10000;
7379 else
7380 link_freq = pipe_config->port_clock;
7381 7716
7382 link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder)); 7717 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
7383 link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder)); 7718}
7384 7719
7385 if (!link_m || !link_n) 7720static void ironlake_pch_clock_get(struct intel_crtc *crtc,
7386 return; 7721 struct intel_crtc_config *pipe_config)
7722{
7723 struct drm_device *dev = crtc->base.dev;
7387 7724
7388 clock = ((u64)link_m * (u64)link_freq * (u64)repeat); 7725 /* read out port_clock from the DPLL */
7389 do_div(clock, link_n); 7726 i9xx_crtc_clock_get(crtc, pipe_config);
7390 7727
7391 pipe_config->adjusted_mode.clock = clock; 7728 /*
7729 * This value does not include pixel_multiplier.
7730 * We will check that port_clock and adjusted_mode.crtc_clock
7731 * agree once we know their relationship in the encoder's
7732 * get_config() function.
7733 */
7734 pipe_config->adjusted_mode.crtc_clock =
7735 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
7736 &pipe_config->fdi_m_n);
7392} 7737}
7393 7738
7394/** Returns the currently programmed mode of the given pipe. */ 7739/** Returns the currently programmed mode of the given pipe. */
@@ -7404,6 +7749,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7404 int hsync = I915_READ(HSYNC(cpu_transcoder)); 7749 int hsync = I915_READ(HSYNC(cpu_transcoder));
7405 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 7750 int vtot = I915_READ(VTOTAL(cpu_transcoder));
7406 int vsync = I915_READ(VSYNC(cpu_transcoder)); 7751 int vsync = I915_READ(VSYNC(cpu_transcoder));
7752 enum pipe pipe = intel_crtc->pipe;
7407 7753
7408 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 7754 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7409 if (!mode) 7755 if (!mode)
@@ -7416,11 +7762,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7416 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 7762 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7417 * to use a real value here instead. 7763 * to use a real value here instead.
7418 */ 7764 */
7419 pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; 7765 pipe_config.cpu_transcoder = (enum transcoder) pipe;
7420 pipe_config.pixel_multiplier = 1; 7766 pipe_config.pixel_multiplier = 1;
7767 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
7768 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
7769 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
7421 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 7770 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7422 7771
7423 mode->clock = pipe_config.adjusted_mode.clock; 7772 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
7424 mode->hdisplay = (htot & 0xffff) + 1; 7773 mode->hdisplay = (htot & 0xffff) + 1;
7425 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 7774 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7426 mode->hsync_start = (hsync & 0xffff) + 1; 7775 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7526,6 +7875,9 @@ void intel_mark_idle(struct drm_device *dev)
7526 7875
7527 intel_decrease_pllclock(crtc); 7876 intel_decrease_pllclock(crtc);
7528 } 7877 }
7878
7879 if (dev_priv->info->gen >= 6)
7880 gen6_rps_idle(dev->dev_private);
7529} 7881}
7530 7882
7531void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 7883void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7714,7 +8066,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7714 intel_ring_emit(ring, 0); /* aux display base address, unused */ 8066 intel_ring_emit(ring, 0); /* aux display base address, unused */
7715 8067
7716 intel_mark_page_flip_active(intel_crtc); 8068 intel_mark_page_flip_active(intel_crtc);
7717 intel_ring_advance(ring); 8069 __intel_ring_advance(ring);
7718 return 0; 8070 return 0;
7719 8071
7720err_unpin: 8072err_unpin:
@@ -7756,7 +8108,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7756 intel_ring_emit(ring, MI_NOOP); 8108 intel_ring_emit(ring, MI_NOOP);
7757 8109
7758 intel_mark_page_flip_active(intel_crtc); 8110 intel_mark_page_flip_active(intel_crtc);
7759 intel_ring_advance(ring); 8111 __intel_ring_advance(ring);
7760 return 0; 8112 return 0;
7761 8113
7762err_unpin: 8114err_unpin:
@@ -7805,7 +8157,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7805 intel_ring_emit(ring, pf | pipesrc); 8157 intel_ring_emit(ring, pf | pipesrc);
7806 8158
7807 intel_mark_page_flip_active(intel_crtc); 8159 intel_mark_page_flip_active(intel_crtc);
7808 intel_ring_advance(ring); 8160 __intel_ring_advance(ring);
7809 return 0; 8161 return 0;
7810 8162
7811err_unpin: 8163err_unpin:
@@ -7850,7 +8202,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7850 intel_ring_emit(ring, pf | pipesrc); 8202 intel_ring_emit(ring, pf | pipesrc);
7851 8203
7852 intel_mark_page_flip_active(intel_crtc); 8204 intel_mark_page_flip_active(intel_crtc);
7853 intel_ring_advance(ring); 8205 __intel_ring_advance(ring);
7854 return 0; 8206 return 0;
7855 8207
7856err_unpin: 8208err_unpin:
@@ -7929,7 +8281,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7929 intel_ring_emit(ring, (MI_NOOP)); 8281 intel_ring_emit(ring, (MI_NOOP));
7930 8282
7931 intel_mark_page_flip_active(intel_crtc); 8283 intel_mark_page_flip_active(intel_crtc);
7932 intel_ring_advance(ring); 8284 __intel_ring_advance(ring);
7933 return 0; 8285 return 0;
7934 8286
7935err_unpin: 8287err_unpin:
@@ -7974,7 +8326,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7974 fb->pitches[0] != crtc->fb->pitches[0])) 8326 fb->pitches[0] != crtc->fb->pitches[0]))
7975 return -EINVAL; 8327 return -EINVAL;
7976 8328
7977 work = kzalloc(sizeof *work, GFP_KERNEL); 8329 work = kzalloc(sizeof(*work), GFP_KERNEL);
7978 if (work == NULL) 8330 if (work == NULL)
7979 return -ENOMEM; 8331 return -ENOMEM;
7980 8332
@@ -8209,6 +8561,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8209 return bpp; 8561 return bpp;
8210} 8562}
8211 8563
8564static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8565{
8566 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8567 "type: 0x%x flags: 0x%x\n",
8568 mode->crtc_clock,
8569 mode->crtc_hdisplay, mode->crtc_hsync_start,
8570 mode->crtc_hsync_end, mode->crtc_htotal,
8571 mode->crtc_vdisplay, mode->crtc_vsync_start,
8572 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
8573}
8574
8212static void intel_dump_pipe_config(struct intel_crtc *crtc, 8575static void intel_dump_pipe_config(struct intel_crtc *crtc,
8213 struct intel_crtc_config *pipe_config, 8576 struct intel_crtc_config *pipe_config,
8214 const char *context) 8577 const char *context)
@@ -8225,10 +8588,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8225 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 8588 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8226 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 8589 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8227 pipe_config->fdi_m_n.tu); 8590 pipe_config->fdi_m_n.tu);
8591 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8592 pipe_config->has_dp_encoder,
8593 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
8594 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
8595 pipe_config->dp_m_n.tu);
8228 DRM_DEBUG_KMS("requested mode:\n"); 8596 DRM_DEBUG_KMS("requested mode:\n");
8229 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 8597 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8230 DRM_DEBUG_KMS("adjusted mode:\n"); 8598 DRM_DEBUG_KMS("adjusted mode:\n");
8231 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); 8599 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
8600 intel_dump_crtc_timings(&pipe_config->adjusted_mode);
8601 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
8602 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
8603 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
8232 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 8604 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8233 pipe_config->gmch_pfit.control, 8605 pipe_config->gmch_pfit.control,
8234 pipe_config->gmch_pfit.pgm_ratios, 8606 pipe_config->gmch_pfit.pgm_ratios,
@@ -8238,6 +8610,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8238 pipe_config->pch_pfit.size, 8610 pipe_config->pch_pfit.size,
8239 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 8611 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8240 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 8612 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8613 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
8241} 8614}
8242 8615
8243static bool check_encoder_cloning(struct drm_crtc *crtc) 8616static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8281,6 +8654,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8281 8654
8282 drm_mode_copy(&pipe_config->adjusted_mode, mode); 8655 drm_mode_copy(&pipe_config->adjusted_mode, mode);
8283 drm_mode_copy(&pipe_config->requested_mode, mode); 8656 drm_mode_copy(&pipe_config->requested_mode, mode);
8657
8284 pipe_config->cpu_transcoder = 8658 pipe_config->cpu_transcoder =
8285 (enum transcoder) to_intel_crtc(crtc)->pipe; 8659 (enum transcoder) to_intel_crtc(crtc)->pipe;
8286 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8660 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8307,13 +8681,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8307 if (plane_bpp < 0) 8681 if (plane_bpp < 0)
8308 goto fail; 8682 goto fail;
8309 8683
8684 /*
8685 * Determine the real pipe dimensions. Note that stereo modes can
8686 * increase the actual pipe size due to the frame doubling and
8687 * insertion of additional space for blanks between the frame. This
8688 * is stored in the crtc timings. We use the requested mode to do this
8689 * computation to clearly distinguish it from the adjusted mode, which
8690 * can be changed by the connectors in the below retry loop.
8691 */
8692 drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
8693 pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
8694 pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
8695
8310encoder_retry: 8696encoder_retry:
8311 /* Ensure the port clock defaults are reset when retrying. */ 8697 /* Ensure the port clock defaults are reset when retrying. */
8312 pipe_config->port_clock = 0; 8698 pipe_config->port_clock = 0;
8313 pipe_config->pixel_multiplier = 1; 8699 pipe_config->pixel_multiplier = 1;
8314 8700
8315 /* Fill in default crtc timings, allow encoders to overwrite them. */ 8701 /* Fill in default crtc timings, allow encoders to overwrite them. */
8316 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0); 8702 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
8317 8703
8318 /* Pass our mode to the connectors and the CRTC to give them a chance to 8704 /* Pass our mode to the connectors and the CRTC to give them a chance to
8319 * adjust it according to limitations or connector properties, and also 8705 * adjust it according to limitations or connector properties, and also
@@ -8334,7 +8720,8 @@ encoder_retry:
8334 /* Set default port clock if not overwritten by the encoder. Needs to be 8720 /* Set default port clock if not overwritten by the encoder. Needs to be
8335 * done afterwards in case the encoder adjusts the mode. */ 8721 * done afterwards in case the encoder adjusts the mode. */
8336 if (!pipe_config->port_clock) 8722 if (!pipe_config->port_clock)
8337 pipe_config->port_clock = pipe_config->adjusted_mode.clock; 8723 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
8724 * pipe_config->pixel_multiplier;
8338 8725
8339 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 8726 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8340 if (ret < 0) { 8727 if (ret < 0) {
@@ -8521,13 +8908,9 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8521 8908
8522} 8909}
8523 8910
8524static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur, 8911static bool intel_fuzzy_clock_check(int clock1, int clock2)
8525 struct intel_crtc_config *new)
8526{ 8912{
8527 int clock1, clock2, diff; 8913 int diff;
8528
8529 clock1 = cur->adjusted_mode.clock;
8530 clock2 = new->adjusted_mode.clock;
8531 8914
8532 if (clock1 == clock2) 8915 if (clock1 == clock2)
8533 return true; 8916 return true;
@@ -8581,6 +8964,15 @@ intel_pipe_config_compare(struct drm_device *dev,
8581 return false; \ 8964 return false; \
8582 } 8965 }
8583 8966
8967#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
8968 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8969 DRM_ERROR("mismatch in " #name " " \
8970 "(expected %i, found %i)\n", \
8971 current_config->name, \
8972 pipe_config->name); \
8973 return false; \
8974 }
8975
8584#define PIPE_CONF_QUIRK(quirk) \ 8976#define PIPE_CONF_QUIRK(quirk) \
8585 ((current_config->quirks | pipe_config->quirks) & (quirk)) 8977 ((current_config->quirks | pipe_config->quirks) & (quirk))
8586 8978
@@ -8594,6 +8986,13 @@ intel_pipe_config_compare(struct drm_device *dev,
8594 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 8986 PIPE_CONF_CHECK_I(fdi_m_n.link_n);
8595 PIPE_CONF_CHECK_I(fdi_m_n.tu); 8987 PIPE_CONF_CHECK_I(fdi_m_n.tu);
8596 8988
8989 PIPE_CONF_CHECK_I(has_dp_encoder);
8990 PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
8991 PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
8992 PIPE_CONF_CHECK_I(dp_m_n.link_m);
8993 PIPE_CONF_CHECK_I(dp_m_n.link_n);
8994 PIPE_CONF_CHECK_I(dp_m_n.tu);
8995
8597 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); 8996 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
8598 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); 8997 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
8599 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); 8998 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8624,8 +9023,8 @@ intel_pipe_config_compare(struct drm_device *dev,
8624 DRM_MODE_FLAG_NVSYNC); 9023 DRM_MODE_FLAG_NVSYNC);
8625 } 9024 }
8626 9025
8627 PIPE_CONF_CHECK_I(requested_mode.hdisplay); 9026 PIPE_CONF_CHECK_I(pipe_src_w);
8628 PIPE_CONF_CHECK_I(requested_mode.vdisplay); 9027 PIPE_CONF_CHECK_I(pipe_src_h);
8629 9028
8630 PIPE_CONF_CHECK_I(gmch_pfit.control); 9029 PIPE_CONF_CHECK_I(gmch_pfit.control);
8631 /* pfit ratios are autocomputed by the hw on gen4+ */ 9030 /* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8640,6 +9039,8 @@ intel_pipe_config_compare(struct drm_device *dev,
8640 9039
8641 PIPE_CONF_CHECK_I(ips_enabled); 9040 PIPE_CONF_CHECK_I(ips_enabled);
8642 9041
9042 PIPE_CONF_CHECK_I(double_wide);
9043
8643 PIPE_CONF_CHECK_I(shared_dpll); 9044 PIPE_CONF_CHECK_I(shared_dpll);
8644 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 9045 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8645 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 9046 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -8649,20 +9050,17 @@ intel_pipe_config_compare(struct drm_device *dev,
8649 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9050 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8650 PIPE_CONF_CHECK_I(pipe_bpp); 9051 PIPE_CONF_CHECK_I(pipe_bpp);
8651 9052
9053 if (!IS_HASWELL(dev)) {
9054 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9055 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9056 }
9057
8652#undef PIPE_CONF_CHECK_X 9058#undef PIPE_CONF_CHECK_X
8653#undef PIPE_CONF_CHECK_I 9059#undef PIPE_CONF_CHECK_I
8654#undef PIPE_CONF_CHECK_FLAGS 9060#undef PIPE_CONF_CHECK_FLAGS
9061#undef PIPE_CONF_CHECK_CLOCK_FUZZY
8655#undef PIPE_CONF_QUIRK 9062#undef PIPE_CONF_QUIRK
8656 9063
8657 if (!IS_HASWELL(dev)) {
8658 if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
8659 DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
8660 current_config->adjusted_mode.clock,
8661 pipe_config->adjusted_mode.clock);
8662 return false;
8663 }
8664 }
8665
8666 return true; 9064 return true;
8667} 9065}
8668 9066
@@ -8794,9 +9192,6 @@ check_crtc_state(struct drm_device *dev)
8794 encoder->get_config(encoder, &pipe_config); 9192 encoder->get_config(encoder, &pipe_config);
8795 } 9193 }
8796 9194
8797 if (dev_priv->display.get_clock)
8798 dev_priv->display.get_clock(crtc, &pipe_config);
8799
8800 WARN(crtc->active != active, 9195 WARN(crtc->active != active,
8801 "crtc active state doesn't match with hw state " 9196 "crtc active state doesn't match with hw state "
8802 "(expected %i, found %i)\n", crtc->active, active); 9197 "(expected %i, found %i)\n", crtc->active, active);
@@ -8871,6 +9266,18 @@ intel_modeset_check_state(struct drm_device *dev)
8871 check_shared_dpll_state(dev); 9266 check_shared_dpll_state(dev);
8872} 9267}
8873 9268
9269void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
9270 int dotclock)
9271{
9272 /*
9273 * FDI already provided one idea for the dotclock.
9274 * Yell if the encoder disagrees.
9275 */
9276 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
9277 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9278 pipe_config->adjusted_mode.crtc_clock, dotclock);
9279}
9280
8874static int __intel_set_mode(struct drm_crtc *crtc, 9281static int __intel_set_mode(struct drm_crtc *crtc,
8875 struct drm_display_mode *mode, 9282 struct drm_display_mode *mode,
8876 int x, int y, struct drm_framebuffer *fb) 9283 int x, int y, struct drm_framebuffer *fb)
@@ -8883,7 +9290,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
8883 unsigned disable_pipes, prepare_pipes, modeset_pipes; 9290 unsigned disable_pipes, prepare_pipes, modeset_pipes;
8884 int ret = 0; 9291 int ret = 0;
8885 9292
8886 saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); 9293 saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
8887 if (!saved_mode) 9294 if (!saved_mode)
8888 return -ENOMEM; 9295 return -ENOMEM;
8889 saved_hwmode = saved_mode + 1; 9296 saved_hwmode = saved_mode + 1;
@@ -9422,7 +9829,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
9422 struct intel_crtc *intel_crtc; 9829 struct intel_crtc *intel_crtc;
9423 int i; 9830 int i;
9424 9831
9425 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 9832 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
9426 if (intel_crtc == NULL) 9833 if (intel_crtc == NULL)
9427 return; 9834 return;
9428 9835
@@ -9573,7 +9980,13 @@ static void intel_setup_outputs(struct drm_device *dev)
9573 if (I915_READ(PCH_DP_D) & DP_DETECTED) 9980 if (I915_READ(PCH_DP_D) & DP_DETECTED)
9574 intel_dp_init(dev, PCH_DP_D, PORT_D); 9981 intel_dp_init(dev, PCH_DP_D, PORT_D);
9575 } else if (IS_VALLEYVIEW(dev)) { 9982 } else if (IS_VALLEYVIEW(dev)) {
9576 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ 9983 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
9984 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
9985 PORT_B);
9986 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
9987 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
9988 }
9989
9577 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { 9990 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
9578 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 9991 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
9579 PORT_C); 9992 PORT_C);
@@ -9582,12 +9995,7 @@ static void intel_setup_outputs(struct drm_device *dev)
9582 PORT_C); 9995 PORT_C);
9583 } 9996 }
9584 9997
9585 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 9998 intel_dsi_init(dev);
9586 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
9587 PORT_B);
9588 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
9589 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
9590 }
9591 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 9999 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
9592 bool found = false; 10000 bool found = false;
9593 10001
@@ -9643,6 +10051,7 @@ static void intel_setup_outputs(struct drm_device *dev)
9643void intel_framebuffer_fini(struct intel_framebuffer *fb) 10051void intel_framebuffer_fini(struct intel_framebuffer *fb)
9644{ 10052{
9645 drm_framebuffer_cleanup(&fb->base); 10053 drm_framebuffer_cleanup(&fb->base);
10054 WARN_ON(!fb->obj->framebuffer_references--);
9646 drm_gem_object_unreference_unlocked(&fb->obj->base); 10055 drm_gem_object_unreference_unlocked(&fb->obj->base);
9647} 10056}
9648 10057
@@ -9674,9 +10083,12 @@ int intel_framebuffer_init(struct drm_device *dev,
9674 struct drm_mode_fb_cmd2 *mode_cmd, 10083 struct drm_mode_fb_cmd2 *mode_cmd,
9675 struct drm_i915_gem_object *obj) 10084 struct drm_i915_gem_object *obj)
9676{ 10085{
10086 int aligned_height, tile_height;
9677 int pitch_limit; 10087 int pitch_limit;
9678 int ret; 10088 int ret;
9679 10089
10090 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
10091
9680 if (obj->tiling_mode == I915_TILING_Y) { 10092 if (obj->tiling_mode == I915_TILING_Y) {
9681 DRM_DEBUG("hardware does not support tiling Y\n"); 10093 DRM_DEBUG("hardware does not support tiling Y\n");
9682 return -EINVAL; 10094 return -EINVAL;
@@ -9765,8 +10177,16 @@ int intel_framebuffer_init(struct drm_device *dev,
9765 if (mode_cmd->offsets[0] != 0) 10177 if (mode_cmd->offsets[0] != 0)
9766 return -EINVAL; 10178 return -EINVAL;
9767 10179
10180 tile_height = IS_GEN2(dev) ? 16 : 8;
10181 aligned_height = ALIGN(mode_cmd->height,
10182 obj->tiling_mode ? tile_height : 1);
10183 /* FIXME drm helper for size checks (especially planar formats)? */
10184 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
10185 return -EINVAL;
10186
9768 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 10187 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
9769 intel_fb->obj = obj; 10188 intel_fb->obj = obj;
10189 intel_fb->obj->framebuffer_references++;
9770 10190
9771 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 10191 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
9772 if (ret) { 10192 if (ret) {
@@ -9792,9 +10212,15 @@ intel_user_framebuffer_create(struct drm_device *dev,
9792 return intel_framebuffer_create(dev, mode_cmd, obj); 10212 return intel_framebuffer_create(dev, mode_cmd, obj);
9793} 10213}
9794 10214
10215#ifndef CONFIG_DRM_I915_FBDEV
10216static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
10217{
10218}
10219#endif
10220
9795static const struct drm_mode_config_funcs intel_mode_funcs = { 10221static const struct drm_mode_config_funcs intel_mode_funcs = {
9796 .fb_create = intel_user_framebuffer_create, 10222 .fb_create = intel_user_framebuffer_create,
9797 .output_poll_changed = intel_fb_output_poll_changed, 10223 .output_poll_changed = intel_fbdev_output_poll_changed,
9798}; 10224};
9799 10225
9800/* Set up chip specific display functions */ 10226/* Set up chip specific display functions */
@@ -9820,7 +10246,6 @@ static void intel_init_display(struct drm_device *dev)
9820 dev_priv->display.update_plane = ironlake_update_plane; 10246 dev_priv->display.update_plane = ironlake_update_plane;
9821 } else if (HAS_PCH_SPLIT(dev)) { 10247 } else if (HAS_PCH_SPLIT(dev)) {
9822 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 10248 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
9823 dev_priv->display.get_clock = ironlake_crtc_clock_get;
9824 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 10249 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9825 dev_priv->display.crtc_enable = ironlake_crtc_enable; 10250 dev_priv->display.crtc_enable = ironlake_crtc_enable;
9826 dev_priv->display.crtc_disable = ironlake_crtc_disable; 10251 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9828,7 +10253,6 @@ static void intel_init_display(struct drm_device *dev)
9828 dev_priv->display.update_plane = ironlake_update_plane; 10253 dev_priv->display.update_plane = ironlake_update_plane;
9829 } else if (IS_VALLEYVIEW(dev)) { 10254 } else if (IS_VALLEYVIEW(dev)) {
9830 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 10255 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9831 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9832 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 10256 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9833 dev_priv->display.crtc_enable = valleyview_crtc_enable; 10257 dev_priv->display.crtc_enable = valleyview_crtc_enable;
9834 dev_priv->display.crtc_disable = i9xx_crtc_disable; 10258 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9836,7 +10260,6 @@ static void intel_init_display(struct drm_device *dev)
9836 dev_priv->display.update_plane = i9xx_update_plane; 10260 dev_priv->display.update_plane = i9xx_update_plane;
9837 } else { 10261 } else {
9838 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 10262 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9839 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9840 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 10263 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9841 dev_priv->display.crtc_enable = i9xx_crtc_enable; 10264 dev_priv->display.crtc_enable = i9xx_crtc_enable;
9842 dev_priv->display.crtc_disable = i9xx_crtc_disable; 10265 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -10012,8 +10435,7 @@ static struct intel_quirk intel_quirks[] = {
10012 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 10435 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10013 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 10436 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
10014 10437
10015 /* 830/845 need to leave pipe A & dpll A up */ 10438 /* 830 needs to leave pipe A & dpll A up */
10016 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10017 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 10439 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10018 10440
10019 /* Lenovo U160 cannot use SSC on LVDS */ 10441 /* Lenovo U160 cannot use SSC on LVDS */
@@ -10022,20 +10444,11 @@ static struct intel_quirk intel_quirks[] = {
10022 /* Sony Vaio Y cannot use SSC on LVDS */ 10444 /* Sony Vaio Y cannot use SSC on LVDS */
10023 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10445 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10024 10446
10025 /* Acer Aspire 5734Z must invert backlight brightness */ 10447 /*
10026 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 10448 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
10027 10449 * seem to use inverted backlight PWM.
10028 /* Acer/eMachines G725 */ 10450 */
10029 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 10451 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
10030
10031 /* Acer/eMachines e725 */
10032 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10033
10034 /* Acer/Packard Bell NCL20 */
10035 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10036
10037 /* Acer Aspire 4736Z */
10038 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10039 10452
10040 /* Dell XPS13 HD Sandy Bridge */ 10453 /* Dell XPS13 HD Sandy Bridge */
10041 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, 10454 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10084,12 +10497,19 @@ static void i915_disable_vga(struct drm_device *dev)
10084 10497
10085void intel_modeset_init_hw(struct drm_device *dev) 10498void intel_modeset_init_hw(struct drm_device *dev)
10086{ 10499{
10087 intel_init_power_well(dev); 10500 struct drm_i915_private *dev_priv = dev->dev_private;
10088 10501
10089 intel_prepare_ddi(dev); 10502 intel_prepare_ddi(dev);
10090 10503
10091 intel_init_clock_gating(dev); 10504 intel_init_clock_gating(dev);
10092 10505
10506 /* Enable the CRI clock source so we can get at the display */
10507 if (IS_VALLEYVIEW(dev))
10508 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
10509 DPLL_INTEGRATED_CRI_CLK_VLV);
10510
10511 intel_init_dpio(dev);
10512
10093 mutex_lock(&dev->struct_mutex); 10513 mutex_lock(&dev->struct_mutex);
10094 intel_enable_gt_powersave(dev); 10514 intel_enable_gt_powersave(dev);
10095 mutex_unlock(&dev->struct_mutex); 10515 mutex_unlock(&dev->struct_mutex);
@@ -10357,7 +10777,7 @@ void i915_redisable_vga(struct drm_device *dev)
10357 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) 10777 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
10358 return; 10778 return;
10359 10779
10360 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10780 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
10361 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10781 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10362 i915_disable_vga(dev); 10782 i915_disable_vga(dev);
10363 } 10783 }
@@ -10380,6 +10800,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
10380 &crtc->config); 10800 &crtc->config);
10381 10801
10382 crtc->base.enabled = crtc->active; 10802 crtc->base.enabled = crtc->active;
10803 crtc->primary_enabled = crtc->active;
10383 10804
10384 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 10805 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
10385 crtc->base.base.id, 10806 crtc->base.base.id,
@@ -10420,20 +10841,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
10420 } 10841 }
10421 10842
10422 encoder->connectors_active = false; 10843 encoder->connectors_active = false;
10423 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", 10844 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10424 encoder->base.base.id, 10845 encoder->base.base.id,
10425 drm_get_encoder_name(&encoder->base), 10846 drm_get_encoder_name(&encoder->base),
10426 encoder->base.crtc ? "enabled" : "disabled", 10847 encoder->base.crtc ? "enabled" : "disabled",
10427 pipe); 10848 pipe_name(pipe));
10428 }
10429
10430 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10431 base.head) {
10432 if (!crtc->active)
10433 continue;
10434 if (dev_priv->display.get_clock)
10435 dev_priv->display.get_clock(crtc,
10436 &crtc->config);
10437 } 10849 }
10438 10850
10439 list_for_each_entry(connector, &dev->mode_config.connector_list, 10851 list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10460,7 +10872,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10460{ 10872{
10461 struct drm_i915_private *dev_priv = dev->dev_private; 10873 struct drm_i915_private *dev_priv = dev->dev_private;
10462 enum pipe pipe; 10874 enum pipe pipe;
10463 struct drm_plane *plane;
10464 struct intel_crtc *crtc; 10875 struct intel_crtc *crtc;
10465 struct intel_encoder *encoder; 10876 struct intel_encoder *encoder;
10466 int i; 10877 int i;
@@ -10507,7 +10918,12 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10507 pll->on = false; 10918 pll->on = false;
10508 } 10919 }
10509 10920
10921 if (IS_HASWELL(dev))
10922 ilk_wm_get_hw_state(dev);
10923
10510 if (force_restore) { 10924 if (force_restore) {
10925 i915_redisable_vga(dev);
10926
10511 /* 10927 /*
10512 * We need to use raw interfaces for restoring state to avoid 10928 * We need to use raw interfaces for restoring state to avoid
10513 * checking (bogus) intermediate states. 10929 * checking (bogus) intermediate states.
@@ -10519,10 +10935,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10519 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 10935 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
10520 crtc->fb); 10936 crtc->fb);
10521 } 10937 }
10522 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
10523 intel_plane_restore(plane);
10524
10525 i915_redisable_vga(dev);
10526 } else { 10938 } else {
10527 intel_modeset_update_staged_output_state(dev); 10939 intel_modeset_update_staged_output_state(dev);
10528 } 10940 }
@@ -10545,6 +10957,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
10545{ 10957{
10546 struct drm_i915_private *dev_priv = dev->dev_private; 10958 struct drm_i915_private *dev_priv = dev->dev_private;
10547 struct drm_crtc *crtc; 10959 struct drm_crtc *crtc;
10960 struct drm_connector *connector;
10548 10961
10549 /* 10962 /*
10550 * Interrupts and polling as the first thing to avoid creating havoc. 10963 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10585,6 +10998,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
10585 /* destroy backlight, if any, before the connectors */ 10998 /* destroy backlight, if any, before the connectors */
10586 intel_panel_destroy_backlight(dev); 10999 intel_panel_destroy_backlight(dev);
10587 11000
11001 /* destroy the sysfs files before encoders/connectors */
11002 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
11003 drm_sysfs_connector_remove(connector);
11004
10588 drm_mode_config_cleanup(dev); 11005 drm_mode_config_cleanup(dev);
10589 11006
10590 intel_cleanup_overlay(dev); 11007 intel_cleanup_overlay(dev);
@@ -10680,7 +11097,7 @@ intel_display_capture_error_state(struct drm_device *dev)
10680 if (INTEL_INFO(dev)->num_pipes == 0) 11097 if (INTEL_INFO(dev)->num_pipes == 0)
10681 return NULL; 11098 return NULL;
10682 11099
10683 error = kmalloc(sizeof(*error), GFP_ATOMIC); 11100 error = kzalloc(sizeof(*error), GFP_ATOMIC);
10684 if (error == NULL) 11101 if (error == NULL)
10685 return NULL; 11102 return NULL;
10686 11103
@@ -10688,6 +11105,9 @@ intel_display_capture_error_state(struct drm_device *dev)
10688 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 11105 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10689 11106
10690 for_each_pipe(i) { 11107 for_each_pipe(i) {
11108 if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
11109 continue;
11110
10691 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 11111 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
10692 error->cursor[i].control = I915_READ(CURCNTR(i)); 11112 error->cursor[i].control = I915_READ(CURCNTR(i));
10693 error->cursor[i].position = I915_READ(CURPOS(i)); 11113 error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10721,6 +11141,10 @@ intel_display_capture_error_state(struct drm_device *dev)
10721 for (i = 0; i < error->num_transcoders; i++) { 11141 for (i = 0; i < error->num_transcoders; i++) {
10722 enum transcoder cpu_transcoder = transcoders[i]; 11142 enum transcoder cpu_transcoder = transcoders[i];
10723 11143
11144 if (!intel_display_power_enabled(dev,
11145 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
11146 continue;
11147
10724 error->transcoder[i].cpu_transcoder = cpu_transcoder; 11148 error->transcoder[i].cpu_transcoder = cpu_transcoder;
10725 11149
10726 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 11150 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
@@ -10732,12 +11156,6 @@ intel_display_capture_error_state(struct drm_device *dev)
10732 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 11156 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
10733 } 11157 }
10734 11158
10735 /* In the code above we read the registers without checking if the power
10736 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
10737 * prevent the next I915_WRITE from detecting it and printing an error
10738 * message. */
10739 intel_uncore_clear_errors(dev);
10740
10741 return error; 11159 return error;
10742} 11160}
10743 11161
@@ -10782,7 +11200,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10782 } 11200 }
10783 11201
10784 for (i = 0; i < error->num_transcoders; i++) { 11202 for (i = 0; i < error->num_transcoders; i++) {
10785 err_printf(m, " CPU transcoder: %c\n", 11203 err_printf(m, "CPU transcoder: %c\n",
10786 transcoder_name(error->transcoder[i].cpu_transcoder)); 11204 transcoder_name(error->transcoder[i].cpu_transcoder));
10787 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 11205 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
10788 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 11206 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1a431377d83b..7619eae35b25 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
38 38
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40 40
41struct dp_link_dpll {
42 int link_bw;
43 struct dpll dpll;
44};
45
46static const struct dp_link_dpll gen4_dpll[] = {
47 { DP_LINK_BW_1_62,
48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 { DP_LINK_BW_2_7,
50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51};
52
53static const struct dp_link_dpll pch_dpll[] = {
54 { DP_LINK_BW_1_62,
55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 { DP_LINK_BW_2_7,
57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58};
59
60static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62,
62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65};
66
41/** 67/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct 69 * @intel_dp: DP struct
@@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
211 } 237 }
212} 238}
213 239
240static void
241intel_dp_init_panel_power_sequencer(struct drm_device *dev,
242 struct intel_dp *intel_dp,
243 struct edp_power_seq *out);
244static void
245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
246 struct intel_dp *intel_dp,
247 struct edp_power_seq *out);
248
249static enum pipe
250vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
251{
252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
254 struct drm_device *dev = intel_dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 enum port port = intel_dig_port->port;
257 enum pipe pipe;
258
259 /* modeset should have pipe */
260 if (crtc)
261 return to_intel_crtc(crtc)->pipe;
262
263 /* init time, try to find a pipe with this port selected */
264 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
265 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
266 PANEL_PORT_SELECT_MASK;
267 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
268 return pipe;
269 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
270 return pipe;
271 }
272
273 /* shrug */
274 return PIPE_A;
275}
276
277static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
278{
279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
280
281 if (HAS_PCH_SPLIT(dev))
282 return PCH_PP_CONTROL;
283 else
284 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
285}
286
287static u32 _pp_stat_reg(struct intel_dp *intel_dp)
288{
289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
290
291 if (HAS_PCH_SPLIT(dev))
292 return PCH_PP_STATUS;
293 else
294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295}
296
214static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
215{ 298{
216 struct drm_device *dev = intel_dp_to_dev(intel_dp); 299 struct drm_device *dev = intel_dp_to_dev(intel_dp);
217 struct drm_i915_private *dev_priv = dev->dev_private; 300 struct drm_i915_private *dev_priv = dev->dev_private;
218 u32 pp_stat_reg;
219 301
220 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
221 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
222} 303}
223 304
224static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
225{ 306{
226 struct drm_device *dev = intel_dp_to_dev(intel_dp); 307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
227 struct drm_i915_private *dev_priv = dev->dev_private; 308 struct drm_i915_private *dev_priv = dev->dev_private;
228 u32 pp_ctrl_reg;
229 309
230 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 310 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
231 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
232} 311}
233 312
234static void 313static void
@@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
236{ 315{
237 struct drm_device *dev = intel_dp_to_dev(intel_dp); 316 struct drm_device *dev = intel_dp_to_dev(intel_dp);
238 struct drm_i915_private *dev_priv = dev->dev_private; 317 struct drm_i915_private *dev_priv = dev->dev_private;
239 u32 pp_stat_reg, pp_ctrl_reg;
240 318
241 if (!is_edp(intel_dp)) 319 if (!is_edp(intel_dp))
242 return; 320 return;
243 321
244 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
245 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
246
247 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
248 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 323 WARN(1, "eDP powered off while attempting aux channel communication.\n");
249 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
250 I915_READ(pp_stat_reg), 325 I915_READ(_pp_stat_reg(intel_dp)),
251 I915_READ(pp_ctrl_reg)); 326 I915_READ(_pp_ctrl_reg(intel_dp)));
252 } 327 }
253} 328}
254 329
@@ -361,6 +436,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
361 goto out; 436 goto out;
362 } 437 }
363 438
439 /* Only 5 data registers! */
440 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
441 ret = -E2BIG;
442 goto out;
443 }
444
364 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 445 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
365 /* Must try at least 3 times according to DP spec */ 446 /* Must try at least 3 times according to DP spec */
366 for (try = 0; try < 5; try++) { 447 for (try = 0; try < 5; try++) {
@@ -451,9 +532,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
451 int msg_bytes; 532 int msg_bytes;
452 uint8_t ack; 533 uint8_t ack;
453 534
535 if (WARN_ON(send_bytes > 16))
536 return -E2BIG;
537
454 intel_dp_check_edp(intel_dp); 538 intel_dp_check_edp(intel_dp);
455 if (send_bytes > 16)
456 return -1;
457 msg[0] = AUX_NATIVE_WRITE << 4; 539 msg[0] = AUX_NATIVE_WRITE << 4;
458 msg[1] = address >> 8; 540 msg[1] = address >> 8;
459 msg[2] = address & 0xff; 541 msg[2] = address & 0xff;
@@ -494,6 +576,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
494 uint8_t ack; 576 uint8_t ack;
495 int ret; 577 int ret;
496 578
579 if (WARN_ON(recv_bytes > 19))
580 return -E2BIG;
581
497 intel_dp_check_edp(intel_dp); 582 intel_dp_check_edp(intel_dp);
498 msg[0] = AUX_NATIVE_READ << 4; 583 msg[0] = AUX_NATIVE_READ << 4;
499 msg[1] = address >> 8; 584 msg[1] = address >> 8;
@@ -538,6 +623,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
538 int reply_bytes; 623 int reply_bytes;
539 int ret; 624 int ret;
540 625
626 ironlake_edp_panel_vdd_on(intel_dp);
541 intel_dp_check_edp(intel_dp); 627 intel_dp_check_edp(intel_dp);
542 /* Set up the command byte */ 628 /* Set up the command byte */
543 if (mode & MODE_I2C_READ) 629 if (mode & MODE_I2C_READ)
@@ -569,13 +655,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
569 break; 655 break;
570 } 656 }
571 657
572 for (retry = 0; retry < 5; retry++) { 658 /*
659 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
660 * required to retry at least seven times upon receiving AUX_DEFER
661 * before giving up the AUX transaction.
662 */
663 for (retry = 0; retry < 7; retry++) {
573 ret = intel_dp_aux_ch(intel_dp, 664 ret = intel_dp_aux_ch(intel_dp,
574 msg, msg_bytes, 665 msg, msg_bytes,
575 reply, reply_bytes); 666 reply, reply_bytes);
576 if (ret < 0) { 667 if (ret < 0) {
577 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 668 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
578 return ret; 669 goto out;
579 } 670 }
580 671
581 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 672 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -586,7 +677,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
586 break; 677 break;
587 case AUX_NATIVE_REPLY_NACK: 678 case AUX_NATIVE_REPLY_NACK:
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 679 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 680 ret = -EREMOTEIO;
681 goto out;
590 case AUX_NATIVE_REPLY_DEFER: 682 case AUX_NATIVE_REPLY_DEFER:
591 /* 683 /*
592 * For now, just give more slack to branch devices. We 684 * For now, just give more slack to branch devices. We
@@ -604,7 +696,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
604 default: 696 default:
605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 697 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
606 reply[0]); 698 reply[0]);
607 return -EREMOTEIO; 699 ret = -EREMOTEIO;
700 goto out;
608 } 701 }
609 702
610 switch (reply[0] & AUX_I2C_REPLY_MASK) { 703 switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -612,22 +705,29 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
612 if (mode == MODE_I2C_READ) { 705 if (mode == MODE_I2C_READ) {
613 *read_byte = reply[1]; 706 *read_byte = reply[1];
614 } 707 }
615 return reply_bytes - 1; 708 ret = reply_bytes - 1;
709 goto out;
616 case AUX_I2C_REPLY_NACK: 710 case AUX_I2C_REPLY_NACK:
617 DRM_DEBUG_KMS("aux_i2c nack\n"); 711 DRM_DEBUG_KMS("aux_i2c nack\n");
618 return -EREMOTEIO; 712 ret = -EREMOTEIO;
713 goto out;
619 case AUX_I2C_REPLY_DEFER: 714 case AUX_I2C_REPLY_DEFER:
620 DRM_DEBUG_KMS("aux_i2c defer\n"); 715 DRM_DEBUG_KMS("aux_i2c defer\n");
621 udelay(100); 716 udelay(100);
622 break; 717 break;
623 default: 718 default:
624 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 719 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
625 return -EREMOTEIO; 720 ret = -EREMOTEIO;
721 goto out;
626 } 722 }
627 } 723 }
628 724
629 DRM_ERROR("too many retries, giving up\n"); 725 DRM_ERROR("too many retries, giving up\n");
630 return -EREMOTEIO; 726 ret = -EREMOTEIO;
727
728out:
729 ironlake_edp_panel_vdd_off(intel_dp, false);
730 return ret;
631} 731}
632 732
633static int 733static int
@@ -649,9 +749,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
649 intel_dp->adapter.algo_data = &intel_dp->algo; 749 intel_dp->adapter.algo_data = &intel_dp->algo;
650 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 750 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
651 751
652 ironlake_edp_panel_vdd_on(intel_dp);
653 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 752 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
654 ironlake_edp_panel_vdd_off(intel_dp, false);
655 return ret; 753 return ret;
656} 754}
657 755
@@ -660,41 +758,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
660 struct intel_crtc_config *pipe_config, int link_bw) 758 struct intel_crtc_config *pipe_config, int link_bw)
661{ 759{
662 struct drm_device *dev = encoder->base.dev; 760 struct drm_device *dev = encoder->base.dev;
761 const struct dp_link_dpll *divisor = NULL;
762 int i, count = 0;
663 763
664 if (IS_G4X(dev)) { 764 if (IS_G4X(dev)) {
665 if (link_bw == DP_LINK_BW_1_62) { 765 divisor = gen4_dpll;
666 pipe_config->dpll.p1 = 2; 766 count = ARRAY_SIZE(gen4_dpll);
667 pipe_config->dpll.p2 = 10;
668 pipe_config->dpll.n = 2;
669 pipe_config->dpll.m1 = 23;
670 pipe_config->dpll.m2 = 8;
671 } else {
672 pipe_config->dpll.p1 = 1;
673 pipe_config->dpll.p2 = 10;
674 pipe_config->dpll.n = 1;
675 pipe_config->dpll.m1 = 14;
676 pipe_config->dpll.m2 = 2;
677 }
678 pipe_config->clock_set = true;
679 } else if (IS_HASWELL(dev)) { 767 } else if (IS_HASWELL(dev)) {
680 /* Haswell has special-purpose DP DDI clocks. */ 768 /* Haswell has special-purpose DP DDI clocks. */
681 } else if (HAS_PCH_SPLIT(dev)) { 769 } else if (HAS_PCH_SPLIT(dev)) {
682 if (link_bw == DP_LINK_BW_1_62) { 770 divisor = pch_dpll;
683 pipe_config->dpll.n = 1; 771 count = ARRAY_SIZE(pch_dpll);
684 pipe_config->dpll.p1 = 2;
685 pipe_config->dpll.p2 = 10;
686 pipe_config->dpll.m1 = 12;
687 pipe_config->dpll.m2 = 9;
688 } else {
689 pipe_config->dpll.n = 2;
690 pipe_config->dpll.p1 = 1;
691 pipe_config->dpll.p2 = 10;
692 pipe_config->dpll.m1 = 14;
693 pipe_config->dpll.m2 = 8;
694 }
695 pipe_config->clock_set = true;
696 } else if (IS_VALLEYVIEW(dev)) { 772 } else if (IS_VALLEYVIEW(dev)) {
697 /* FIXME: Need to figure out optimized DP clocks for vlv. */ 773 divisor = vlv_dpll;
774 count = ARRAY_SIZE(vlv_dpll);
775 }
776
777 if (divisor && count) {
778 for (i = 0; i < count; i++) {
779 if (link_bw == divisor[i].link_bw) {
780 pipe_config->dpll = divisor[i].dpll;
781 pipe_config->clock_set = true;
782 break;
783 }
784 }
698 } 785 }
699} 786}
700 787
@@ -737,19 +824,22 @@ intel_dp_compute_config(struct intel_encoder *encoder,
737 824
738 DRM_DEBUG_KMS("DP link computation with max lane count %i " 825 DRM_DEBUG_KMS("DP link computation with max lane count %i "
739 "max bw %02x pixel clock %iKHz\n", 826 "max bw %02x pixel clock %iKHz\n",
740 max_lane_count, bws[max_clock], adjusted_mode->clock); 827 max_lane_count, bws[max_clock],
828 adjusted_mode->crtc_clock);
741 829
742 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 830 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
743 * bpc in between. */ 831 * bpc in between. */
744 bpp = pipe_config->pipe_bpp; 832 bpp = pipe_config->pipe_bpp;
745 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { 833 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
834 dev_priv->vbt.edp_bpp < bpp) {
746 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 835 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
747 dev_priv->vbt.edp_bpp); 836 dev_priv->vbt.edp_bpp);
748 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); 837 bpp = dev_priv->vbt.edp_bpp;
749 } 838 }
750 839
751 for (; bpp >= 6*3; bpp -= 2*3) { 840 for (; bpp >= 6*3; bpp -= 2*3) {
752 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 841 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
842 bpp);
753 843
754 for (clock = 0; clock <= max_clock; clock++) { 844 for (clock = 0; clock <= max_clock; clock++) {
755 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 845 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +884,8 @@ found:
794 mode_rate, link_avail); 884 mode_rate, link_avail);
795 885
796 intel_link_compute_m_n(bpp, lane_count, 886 intel_link_compute_m_n(bpp, lane_count,
797 adjusted_mode->clock, pipe_config->port_clock, 887 adjusted_mode->crtc_clock,
888 pipe_config->port_clock,
798 &pipe_config->dp_m_n); 889 &pipe_config->dp_m_n);
799 890
800 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 891 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +893,6 @@ found:
802 return true; 893 return true;
803} 894}
804 895
805void intel_dp_init_link_config(struct intel_dp *intel_dp)
806{
807 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
808 intel_dp->link_configuration[0] = intel_dp->link_bw;
809 intel_dp->link_configuration[1] = intel_dp->lane_count;
810 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
811 /*
812 * Check for DPCD version > 1.1 and enhanced framing support
813 */
814 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
815 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
816 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
817 }
818}
819
820static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 896static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
821{ 897{
822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 898 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +965,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
889 intel_write_eld(&encoder->base, adjusted_mode); 965 intel_write_eld(&encoder->base, adjusted_mode);
890 } 966 }
891 967
892 intel_dp_init_link_config(intel_dp);
893
894 /* Split out the IBX/CPU vs CPT settings */ 968 /* Split out the IBX/CPU vs CPT settings */
895 969
896 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 970 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +974,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
900 intel_dp->DP |= DP_SYNC_VS_HIGH; 974 intel_dp->DP |= DP_SYNC_VS_HIGH;
901 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 975 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
902 976
903 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 977 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
904 intel_dp->DP |= DP_ENHANCED_FRAMING; 978 intel_dp->DP |= DP_ENHANCED_FRAMING;
905 979
906 intel_dp->DP |= crtc->pipe << 29; 980 intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +988,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
914 intel_dp->DP |= DP_SYNC_VS_HIGH; 988 intel_dp->DP |= DP_SYNC_VS_HIGH;
915 intel_dp->DP |= DP_LINK_TRAIN_OFF; 989 intel_dp->DP |= DP_LINK_TRAIN_OFF;
916 990
917 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 991 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
918 intel_dp->DP |= DP_ENHANCED_FRAMING; 992 intel_dp->DP |= DP_ENHANCED_FRAMING;
919 993
920 if (crtc->pipe == 1) 994 if (crtc->pipe == 1)
@@ -944,8 +1018,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
944 struct drm_i915_private *dev_priv = dev->dev_private; 1018 struct drm_i915_private *dev_priv = dev->dev_private;
945 u32 pp_stat_reg, pp_ctrl_reg; 1019 u32 pp_stat_reg, pp_ctrl_reg;
946 1020
947 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1021 pp_stat_reg = _pp_stat_reg(intel_dp);
948 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1022 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
949 1023
950 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1024 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
951 mask, value, 1025 mask, value,
@@ -987,11 +1061,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
987 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1061 struct drm_device *dev = intel_dp_to_dev(intel_dp);
988 struct drm_i915_private *dev_priv = dev->dev_private; 1062 struct drm_i915_private *dev_priv = dev->dev_private;
989 u32 control; 1063 u32 control;
990 u32 pp_ctrl_reg;
991
992 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
993 control = I915_READ(pp_ctrl_reg);
994 1064
1065 control = I915_READ(_pp_ctrl_reg(intel_dp));
995 control &= ~PANEL_UNLOCK_MASK; 1066 control &= ~PANEL_UNLOCK_MASK;
996 control |= PANEL_UNLOCK_REGS; 1067 control |= PANEL_UNLOCK_REGS;
997 return control; 1068 return control;
@@ -1006,17 +1077,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1006 1077
1007 if (!is_edp(intel_dp)) 1078 if (!is_edp(intel_dp))
1008 return; 1079 return;
1009 DRM_DEBUG_KMS("Turn eDP VDD on\n");
1010 1080
1011 WARN(intel_dp->want_panel_vdd, 1081 WARN(intel_dp->want_panel_vdd,
1012 "eDP VDD already requested on\n"); 1082 "eDP VDD already requested on\n");
1013 1083
1014 intel_dp->want_panel_vdd = true; 1084 intel_dp->want_panel_vdd = true;
1015 1085
1016 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1086 if (ironlake_edp_have_panel_vdd(intel_dp))
1017 DRM_DEBUG_KMS("eDP VDD already on\n");
1018 return; 1087 return;
1019 } 1088
1089 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1020 1090
1021 if (!ironlake_edp_have_panel_power(intel_dp)) 1091 if (!ironlake_edp_have_panel_power(intel_dp))
1022 ironlake_wait_panel_power_cycle(intel_dp); 1092 ironlake_wait_panel_power_cycle(intel_dp);
@@ -1024,8 +1094,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1024 pp = ironlake_get_pp_control(intel_dp); 1094 pp = ironlake_get_pp_control(intel_dp);
1025 pp |= EDP_FORCE_VDD; 1095 pp |= EDP_FORCE_VDD;
1026 1096
1027 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1097 pp_stat_reg = _pp_stat_reg(intel_dp);
1028 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1098 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1029 1099
1030 I915_WRITE(pp_ctrl_reg, pp); 1100 I915_WRITE(pp_ctrl_reg, pp);
1031 POSTING_READ(pp_ctrl_reg); 1101 POSTING_READ(pp_ctrl_reg);
@@ -1050,11 +1120,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1050 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1120 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1051 1121
1052 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1122 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1123 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1124
1053 pp = ironlake_get_pp_control(intel_dp); 1125 pp = ironlake_get_pp_control(intel_dp);
1054 pp &= ~EDP_FORCE_VDD; 1126 pp &= ~EDP_FORCE_VDD;
1055 1127
1056 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1128 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1057 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1129 pp_stat_reg = _pp_stat_reg(intel_dp);
1058 1130
1059 I915_WRITE(pp_ctrl_reg, pp); 1131 I915_WRITE(pp_ctrl_reg, pp);
1060 POSTING_READ(pp_ctrl_reg); 1132 POSTING_READ(pp_ctrl_reg);
@@ -1082,7 +1154,6 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1082 if (!is_edp(intel_dp)) 1154 if (!is_edp(intel_dp))
1083 return; 1155 return;
1084 1156
1085 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1086 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1157 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1087 1158
1088 intel_dp->want_panel_vdd = false; 1159 intel_dp->want_panel_vdd = false;
@@ -1119,20 +1190,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1119 1190
1120 ironlake_wait_panel_power_cycle(intel_dp); 1191 ironlake_wait_panel_power_cycle(intel_dp);
1121 1192
1193 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1122 pp = ironlake_get_pp_control(intel_dp); 1194 pp = ironlake_get_pp_control(intel_dp);
1123 if (IS_GEN5(dev)) { 1195 if (IS_GEN5(dev)) {
1124 /* ILK workaround: disable reset around power sequence */ 1196 /* ILK workaround: disable reset around power sequence */
1125 pp &= ~PANEL_POWER_RESET; 1197 pp &= ~PANEL_POWER_RESET;
1126 I915_WRITE(PCH_PP_CONTROL, pp); 1198 I915_WRITE(pp_ctrl_reg, pp);
1127 POSTING_READ(PCH_PP_CONTROL); 1199 POSTING_READ(pp_ctrl_reg);
1128 } 1200 }
1129 1201
1130 pp |= POWER_TARGET_ON; 1202 pp |= POWER_TARGET_ON;
1131 if (!IS_GEN5(dev)) 1203 if (!IS_GEN5(dev))
1132 pp |= PANEL_POWER_RESET; 1204 pp |= PANEL_POWER_RESET;
1133 1205
1134 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1135
1136 I915_WRITE(pp_ctrl_reg, pp); 1206 I915_WRITE(pp_ctrl_reg, pp);
1137 POSTING_READ(pp_ctrl_reg); 1207 POSTING_READ(pp_ctrl_reg);
1138 1208
@@ -1140,8 +1210,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1140 1210
1141 if (IS_GEN5(dev)) { 1211 if (IS_GEN5(dev)) {
1142 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1212 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1143 I915_WRITE(PCH_PP_CONTROL, pp); 1213 I915_WRITE(pp_ctrl_reg, pp);
1144 POSTING_READ(PCH_PP_CONTROL); 1214 POSTING_READ(pp_ctrl_reg);
1145 } 1215 }
1146} 1216}
1147 1217
@@ -1164,7 +1234,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1164 * panels get very unhappy and cease to work. */ 1234 * panels get very unhappy and cease to work. */
1165 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1235 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1166 1236
1167 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1237 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1168 1238
1169 I915_WRITE(pp_ctrl_reg, pp); 1239 I915_WRITE(pp_ctrl_reg, pp);
1170 POSTING_READ(pp_ctrl_reg); 1240 POSTING_READ(pp_ctrl_reg);
@@ -1197,7 +1267,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1197 pp = ironlake_get_pp_control(intel_dp); 1267 pp = ironlake_get_pp_control(intel_dp);
1198 pp |= EDP_BLC_ENABLE; 1268 pp |= EDP_BLC_ENABLE;
1199 1269
1200 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1270 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1201 1271
1202 I915_WRITE(pp_ctrl_reg, pp); 1272 I915_WRITE(pp_ctrl_reg, pp);
1203 POSTING_READ(pp_ctrl_reg); 1273 POSTING_READ(pp_ctrl_reg);
@@ -1221,7 +1291,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1221 pp = ironlake_get_pp_control(intel_dp); 1291 pp = ironlake_get_pp_control(intel_dp);
1222 pp &= ~EDP_BLC_ENABLE; 1292 pp &= ~EDP_BLC_ENABLE;
1223 1293
1224 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1294 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1225 1295
1226 I915_WRITE(pp_ctrl_reg, pp); 1296 I915_WRITE(pp_ctrl_reg, pp);
1227 POSTING_READ(pp_ctrl_reg); 1297 POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1438,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1368 struct drm_i915_private *dev_priv = dev->dev_private; 1438 struct drm_i915_private *dev_priv = dev->dev_private;
1369 enum port port = dp_to_dig_port(intel_dp)->port; 1439 enum port port = dp_to_dig_port(intel_dp)->port;
1370 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1440 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1441 int dotclock;
1371 1442
1372 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1443 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1373 tmp = I915_READ(intel_dp->output_reg); 1444 tmp = I915_READ(intel_dp->output_reg);
@@ -1395,13 +1466,25 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1395 1466
1396 pipe_config->adjusted_mode.flags |= flags; 1467 pipe_config->adjusted_mode.flags |= flags;
1397 1468
1398 if (dp_to_dig_port(intel_dp)->port == PORT_A) { 1469 pipe_config->has_dp_encoder = true;
1470
1471 intel_dp_get_m_n(crtc, pipe_config);
1472
1473 if (port == PORT_A) {
1399 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1474 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1400 pipe_config->port_clock = 162000; 1475 pipe_config->port_clock = 162000;
1401 else 1476 else
1402 pipe_config->port_clock = 270000; 1477 pipe_config->port_clock = 270000;
1403 } 1478 }
1404 1479
1480 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1481 &pipe_config->dp_m_n);
1482
1483 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1484 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1485
1486 pipe_config->adjusted_mode.crtc_clock = dotclock;
1487
1405 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 1488 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1406 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1489 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1407 /* 1490 /*
@@ -1423,20 +1506,21 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1423 } 1506 }
1424} 1507}
1425 1508
1426static bool is_edp_psr(struct intel_dp *intel_dp) 1509static bool is_edp_psr(struct drm_device *dev)
1427{ 1510{
1428 return is_edp(intel_dp) && 1511 struct drm_i915_private *dev_priv = dev->dev_private;
1429 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; 1512
1513 return dev_priv->psr.sink_support;
1430} 1514}
1431 1515
1432static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1516static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1433{ 1517{
1434 struct drm_i915_private *dev_priv = dev->dev_private; 1518 struct drm_i915_private *dev_priv = dev->dev_private;
1435 1519
1436 if (!IS_HASWELL(dev)) 1520 if (!HAS_PSR(dev))
1437 return false; 1521 return false;
1438 1522
1439 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 1523 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1440} 1524}
1441 1525
1442static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1526static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1486,7 +1570,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1486 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1570 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1487 1571
1488 /* Avoid continuous PSR exit by masking memup and hpd */ 1572 /* Avoid continuous PSR exit by masking memup and hpd */
1489 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1573 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1490 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); 1574 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1491 1575
1492 intel_dp->psr_setup_done = true; 1576 intel_dp->psr_setup_done = true;
@@ -1511,9 +1595,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1511 DP_PSR_MAIN_LINK_ACTIVE); 1595 DP_PSR_MAIN_LINK_ACTIVE);
1512 1596
1513 /* Setup AUX registers */ 1597 /* Setup AUX registers */
1514 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); 1598 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1515 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); 1599 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1516 I915_WRITE(EDP_PSR_AUX_CTL, 1600 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1517 DP_AUX_CH_CTL_TIME_OUT_400us | 1601 DP_AUX_CH_CTL_TIME_OUT_400us |
1518 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1602 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1519 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1603 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1536,7 +1620,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1536 } else 1620 } else
1537 val |= EDP_PSR_LINK_DISABLE; 1621 val |= EDP_PSR_LINK_DISABLE;
1538 1622
1539 I915_WRITE(EDP_PSR_CTL, val | 1623 I915_WRITE(EDP_PSR_CTL(dev), val |
1540 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1624 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1541 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1625 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1542 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1626 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -1553,42 +1637,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1553 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1637 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1554 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1638 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1555 1639
1556 if (!IS_HASWELL(dev)) { 1640 dev_priv->psr.source_ok = false;
1641
1642 if (!HAS_PSR(dev)) {
1557 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1643 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1558 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1559 return false; 1644 return false;
1560 } 1645 }
1561 1646
1562 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1647 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1563 (dig_port->port != PORT_A)) { 1648 (dig_port->port != PORT_A)) {
1564 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1649 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1565 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1566 return false;
1567 }
1568
1569 if (!is_edp_psr(intel_dp)) {
1570 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1571 dev_priv->no_psr_reason = PSR_NO_SINK;
1572 return false; 1650 return false;
1573 } 1651 }
1574 1652
1575 if (!i915_enable_psr) { 1653 if (!i915_enable_psr) {
1576 DRM_DEBUG_KMS("PSR disable by flag\n"); 1654 DRM_DEBUG_KMS("PSR disable by flag\n");
1577 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1578 return false; 1655 return false;
1579 } 1656 }
1580 1657
1581 crtc = dig_port->base.base.crtc; 1658 crtc = dig_port->base.base.crtc;
1582 if (crtc == NULL) { 1659 if (crtc == NULL) {
1583 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1660 DRM_DEBUG_KMS("crtc not active for PSR\n");
1584 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1585 return false; 1661 return false;
1586 } 1662 }
1587 1663
1588 intel_crtc = to_intel_crtc(crtc); 1664 intel_crtc = to_intel_crtc(crtc);
1589 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { 1665 if (!intel_crtc_active(crtc)) {
1590 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1666 DRM_DEBUG_KMS("crtc not active for PSR\n");
1591 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1592 return false; 1667 return false;
1593 } 1668 }
1594 1669
@@ -1596,29 +1671,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1596 if (obj->tiling_mode != I915_TILING_X || 1671 if (obj->tiling_mode != I915_TILING_X ||
1597 obj->fence_reg == I915_FENCE_REG_NONE) { 1672 obj->fence_reg == I915_FENCE_REG_NONE) {
1598 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1673 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1599 dev_priv->no_psr_reason = PSR_NOT_TILED;
1600 return false; 1674 return false;
1601 } 1675 }
1602 1676
1603 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1677 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1604 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1678 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1605 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1606 return false; 1679 return false;
1607 } 1680 }
1608 1681
1609 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1682 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1610 S3D_ENABLE) { 1683 S3D_ENABLE) {
1611 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1684 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1612 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1613 return false; 1685 return false;
1614 } 1686 }
1615 1687
1616 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 1688 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1617 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1689 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1618 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1619 return false; 1690 return false;
1620 } 1691 }
1621 1692
1693 dev_priv->psr.source_ok = true;
1622 return true; 1694 return true;
1623} 1695}
1624 1696
@@ -1657,10 +1729,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1657 if (!intel_edp_is_psr_enabled(dev)) 1729 if (!intel_edp_is_psr_enabled(dev))
1658 return; 1730 return;
1659 1731
1660 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 1732 I915_WRITE(EDP_PSR_CTL(dev),
1733 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1661 1734
1662 /* Wait till PSR is idle */ 1735 /* Wait till PSR is idle */
1663 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 1736 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1664 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1737 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1665 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1738 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1666} 1739}
@@ -1674,7 +1747,7 @@ void intel_edp_psr_update(struct drm_device *dev)
1674 if (encoder->type == INTEL_OUTPUT_EDP) { 1747 if (encoder->type == INTEL_OUTPUT_EDP) {
1675 intel_dp = enc_to_intel_dp(&encoder->base); 1748 intel_dp = enc_to_intel_dp(&encoder->base);
1676 1749
1677 if (!is_edp_psr(intel_dp)) 1750 if (!is_edp_psr(dev))
1678 return; 1751 return;
1679 1752
1680 if (!intel_edp_psr_match_conditions(intel_dp)) 1753 if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1733,14 +1806,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1733 ironlake_edp_panel_vdd_off(intel_dp, true); 1806 ironlake_edp_panel_vdd_off(intel_dp, true);
1734 intel_dp_complete_link_train(intel_dp); 1807 intel_dp_complete_link_train(intel_dp);
1735 intel_dp_stop_link_train(intel_dp); 1808 intel_dp_stop_link_train(intel_dp);
1809}
1810
1811static void g4x_enable_dp(struct intel_encoder *encoder)
1812{
1813 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1814
1815 intel_enable_dp(encoder);
1736 ironlake_edp_backlight_on(intel_dp); 1816 ironlake_edp_backlight_on(intel_dp);
1737} 1817}
1738 1818
1739static void vlv_enable_dp(struct intel_encoder *encoder) 1819static void vlv_enable_dp(struct intel_encoder *encoder)
1740{ 1820{
1821 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1822
1823 ironlake_edp_backlight_on(intel_dp);
1741} 1824}
1742 1825
1743static void intel_pre_enable_dp(struct intel_encoder *encoder) 1826static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1744{ 1827{
1745 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1828 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1746 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1829 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1758,53 +1841,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1758 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1841 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1759 int port = vlv_dport_to_channel(dport); 1842 int port = vlv_dport_to_channel(dport);
1760 int pipe = intel_crtc->pipe; 1843 int pipe = intel_crtc->pipe;
1844 struct edp_power_seq power_seq;
1761 u32 val; 1845 u32 val;
1762 1846
1763 mutex_lock(&dev_priv->dpio_lock); 1847 mutex_lock(&dev_priv->dpio_lock);
1764 1848
1765 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1849 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1766 val = 0; 1850 val = 0;
1767 if (pipe) 1851 if (pipe)
1768 val |= (1<<21); 1852 val |= (1<<21);
1769 else 1853 else
1770 val &= ~(1<<21); 1854 val &= ~(1<<21);
1771 val |= 0x001000c4; 1855 val |= 0x001000c4;
1772 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1856 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1773 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018); 1857 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
1774 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); 1858 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
1775 1859
1776 mutex_unlock(&dev_priv->dpio_lock); 1860 mutex_unlock(&dev_priv->dpio_lock);
1777 1861
1862 /* init power sequencer on this pipe and port */
1863 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1864 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1865 &power_seq);
1866
1778 intel_enable_dp(encoder); 1867 intel_enable_dp(encoder);
1779 1868
1780 vlv_wait_port_ready(dev_priv, port); 1869 vlv_wait_port_ready(dev_priv, port);
1781} 1870}
1782 1871
1783static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) 1872static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1784{ 1873{
1785 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1874 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1786 struct drm_device *dev = encoder->base.dev; 1875 struct drm_device *dev = encoder->base.dev;
1787 struct drm_i915_private *dev_priv = dev->dev_private; 1876 struct drm_i915_private *dev_priv = dev->dev_private;
1877 struct intel_crtc *intel_crtc =
1878 to_intel_crtc(encoder->base.crtc);
1788 int port = vlv_dport_to_channel(dport); 1879 int port = vlv_dport_to_channel(dport);
1789 1880 int pipe = intel_crtc->pipe;
1790 if (!IS_VALLEYVIEW(dev))
1791 return;
1792 1881
1793 /* Program Tx lane resets to default */ 1882 /* Program Tx lane resets to default */
1794 mutex_lock(&dev_priv->dpio_lock); 1883 mutex_lock(&dev_priv->dpio_lock);
1795 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1884 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1796 DPIO_PCS_TX_LANE2_RESET | 1885 DPIO_PCS_TX_LANE2_RESET |
1797 DPIO_PCS_TX_LANE1_RESET); 1886 DPIO_PCS_TX_LANE1_RESET);
1798 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1887 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1799 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1888 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1800 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1889 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1801 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1890 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1802 DPIO_PCS_CLK_SOFT_RESET); 1891 DPIO_PCS_CLK_SOFT_RESET);
1803 1892
1804 /* Fix up inter-pair skew failure */ 1893 /* Fix up inter-pair skew failure */
1805 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1894 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1806 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1895 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1807 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1896 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1808 mutex_unlock(&dev_priv->dpio_lock); 1897 mutex_unlock(&dev_priv->dpio_lock);
1809} 1898}
1810 1899
@@ -1939,10 +2028,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1939 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2028 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1940 struct drm_i915_private *dev_priv = dev->dev_private; 2029 struct drm_i915_private *dev_priv = dev->dev_private;
1941 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2030 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2031 struct intel_crtc *intel_crtc =
2032 to_intel_crtc(dport->base.base.crtc);
1942 unsigned long demph_reg_value, preemph_reg_value, 2033 unsigned long demph_reg_value, preemph_reg_value,
1943 uniqtranscale_reg_value; 2034 uniqtranscale_reg_value;
1944 uint8_t train_set = intel_dp->train_set[0]; 2035 uint8_t train_set = intel_dp->train_set[0];
1945 int port = vlv_dport_to_channel(dport); 2036 int port = vlv_dport_to_channel(dport);
2037 int pipe = intel_crtc->pipe;
1946 2038
1947 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2039 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1948 case DP_TRAIN_PRE_EMPHASIS_0: 2040 case DP_TRAIN_PRE_EMPHASIS_0:
@@ -2018,21 +2110,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2018 } 2110 }
2019 2111
2020 mutex_lock(&dev_priv->dpio_lock); 2112 mutex_lock(&dev_priv->dpio_lock);
2021 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); 2113 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
2022 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); 2114 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
2023 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 2115 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
2024 uniqtranscale_reg_value); 2116 uniqtranscale_reg_value);
2025 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040); 2117 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
2026 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 2118 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
2027 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 2119 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
2028 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); 2120 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
2029 mutex_unlock(&dev_priv->dpio_lock); 2121 mutex_unlock(&dev_priv->dpio_lock);
2030 2122
2031 return 0; 2123 return 0;
2032} 2124}
2033 2125
2034static void 2126static void
2035intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 2127intel_get_adjust_train(struct intel_dp *intel_dp,
2128 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2036{ 2129{
2037 uint8_t v = 0; 2130 uint8_t v = 0;
2038 uint8_t p = 0; 2131 uint8_t p = 0;
@@ -2227,14 +2320,15 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2227 2320
2228static bool 2321static bool
2229intel_dp_set_link_train(struct intel_dp *intel_dp, 2322intel_dp_set_link_train(struct intel_dp *intel_dp,
2230 uint32_t dp_reg_value, 2323 uint32_t *DP,
2231 uint8_t dp_train_pat) 2324 uint8_t dp_train_pat)
2232{ 2325{
2233 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2326 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_device *dev = intel_dig_port->base.base.dev; 2327 struct drm_device *dev = intel_dig_port->base.base.dev;
2235 struct drm_i915_private *dev_priv = dev->dev_private; 2328 struct drm_i915_private *dev_priv = dev->dev_private;
2236 enum port port = intel_dig_port->port; 2329 enum port port = intel_dig_port->port;
2237 int ret; 2330 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2331 int ret, len;
2238 2332
2239 if (HAS_DDI(dev)) { 2333 if (HAS_DDI(dev)) {
2240 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2334 uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2263,62 +2357,93 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2263 I915_WRITE(DP_TP_CTL(port), temp); 2357 I915_WRITE(DP_TP_CTL(port), temp);
2264 2358
2265 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2359 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2266 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 2360 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2267 2361
2268 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2362 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2269 case DP_TRAINING_PATTERN_DISABLE: 2363 case DP_TRAINING_PATTERN_DISABLE:
2270 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 2364 *DP |= DP_LINK_TRAIN_OFF_CPT;
2271 break; 2365 break;
2272 case DP_TRAINING_PATTERN_1: 2366 case DP_TRAINING_PATTERN_1:
2273 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 2367 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2274 break; 2368 break;
2275 case DP_TRAINING_PATTERN_2: 2369 case DP_TRAINING_PATTERN_2:
2276 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2370 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2277 break; 2371 break;
2278 case DP_TRAINING_PATTERN_3: 2372 case DP_TRAINING_PATTERN_3:
2279 DRM_ERROR("DP training pattern 3 not supported\n"); 2373 DRM_ERROR("DP training pattern 3 not supported\n");
2280 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2374 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2281 break; 2375 break;
2282 } 2376 }
2283 2377
2284 } else { 2378 } else {
2285 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 2379 *DP &= ~DP_LINK_TRAIN_MASK;
2286 2380
2287 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2381 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2288 case DP_TRAINING_PATTERN_DISABLE: 2382 case DP_TRAINING_PATTERN_DISABLE:
2289 dp_reg_value |= DP_LINK_TRAIN_OFF; 2383 *DP |= DP_LINK_TRAIN_OFF;
2290 break; 2384 break;
2291 case DP_TRAINING_PATTERN_1: 2385 case DP_TRAINING_PATTERN_1:
2292 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 2386 *DP |= DP_LINK_TRAIN_PAT_1;
2293 break; 2387 break;
2294 case DP_TRAINING_PATTERN_2: 2388 case DP_TRAINING_PATTERN_2:
2295 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2389 *DP |= DP_LINK_TRAIN_PAT_2;
2296 break; 2390 break;
2297 case DP_TRAINING_PATTERN_3: 2391 case DP_TRAINING_PATTERN_3:
2298 DRM_ERROR("DP training pattern 3 not supported\n"); 2392 DRM_ERROR("DP training pattern 3 not supported\n");
2299 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2393 *DP |= DP_LINK_TRAIN_PAT_2;
2300 break; 2394 break;
2301 } 2395 }
2302 } 2396 }
2303 2397
2304 I915_WRITE(intel_dp->output_reg, dp_reg_value); 2398 I915_WRITE(intel_dp->output_reg, *DP);
2305 POSTING_READ(intel_dp->output_reg); 2399 POSTING_READ(intel_dp->output_reg);
2306 2400
2307 intel_dp_aux_native_write_1(intel_dp, 2401 buf[0] = dp_train_pat;
2308 DP_TRAINING_PATTERN_SET, 2402 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2309 dp_train_pat);
2310
2311 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
2312 DP_TRAINING_PATTERN_DISABLE) { 2403 DP_TRAINING_PATTERN_DISABLE) {
2313 ret = intel_dp_aux_native_write(intel_dp, 2404 /* don't write DP_TRAINING_LANEx_SET on disable */
2314 DP_TRAINING_LANE0_SET, 2405 len = 1;
2315 intel_dp->train_set, 2406 } else {
2316 intel_dp->lane_count); 2407 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2317 if (ret != intel_dp->lane_count) 2408 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2318 return false; 2409 len = intel_dp->lane_count + 1;
2319 } 2410 }
2320 2411
2321 return true; 2412 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
2413 buf, len);
2414
2415 return ret == len;
2416}
2417
2418static bool
2419intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2420 uint8_t dp_train_pat)
2421{
2422 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2423 intel_dp_set_signal_levels(intel_dp, DP);
2424 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2425}
2426
2427static bool
2428intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2429 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2430{
2431 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2432 struct drm_device *dev = intel_dig_port->base.base.dev;
2433 struct drm_i915_private *dev_priv = dev->dev_private;
2434 int ret;
2435
2436 intel_get_adjust_train(intel_dp, link_status);
2437 intel_dp_set_signal_levels(intel_dp, DP);
2438
2439 I915_WRITE(intel_dp->output_reg, *DP);
2440 POSTING_READ(intel_dp->output_reg);
2441
2442 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2443 intel_dp->train_set,
2444 intel_dp->lane_count);
2445
2446 return ret == intel_dp->lane_count;
2322} 2447}
2323 2448
2324static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2449static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2362,32 +2487,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2362 uint8_t voltage; 2487 uint8_t voltage;
2363 int voltage_tries, loop_tries; 2488 int voltage_tries, loop_tries;
2364 uint32_t DP = intel_dp->DP; 2489 uint32_t DP = intel_dp->DP;
2490 uint8_t link_config[2];
2365 2491
2366 if (HAS_DDI(dev)) 2492 if (HAS_DDI(dev))
2367 intel_ddi_prepare_link_retrain(encoder); 2493 intel_ddi_prepare_link_retrain(encoder);
2368 2494
2369 /* Write the link configuration data */ 2495 /* Write the link configuration data */
2370 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 2496 link_config[0] = intel_dp->link_bw;
2371 intel_dp->link_configuration, 2497 link_config[1] = intel_dp->lane_count;
2372 DP_LINK_CONFIGURATION_SIZE); 2498 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2499 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2500 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2501
2502 link_config[0] = 0;
2503 link_config[1] = DP_SET_ANSI_8B10B;
2504 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
2373 2505
2374 DP |= DP_PORT_EN; 2506 DP |= DP_PORT_EN;
2375 2507
2376 memset(intel_dp->train_set, 0, 4); 2508 /* clock recovery */
2509 if (!intel_dp_reset_link_train(intel_dp, &DP,
2510 DP_TRAINING_PATTERN_1 |
2511 DP_LINK_SCRAMBLING_DISABLE)) {
2512 DRM_ERROR("failed to enable link training\n");
2513 return;
2514 }
2515
2377 voltage = 0xff; 2516 voltage = 0xff;
2378 voltage_tries = 0; 2517 voltage_tries = 0;
2379 loop_tries = 0; 2518 loop_tries = 0;
2380 for (;;) { 2519 for (;;) {
2381 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2520 uint8_t link_status[DP_LINK_STATUS_SIZE];
2382 uint8_t link_status[DP_LINK_STATUS_SIZE];
2383
2384 intel_dp_set_signal_levels(intel_dp, &DP);
2385
2386 /* Set training pattern 1 */
2387 if (!intel_dp_set_link_train(intel_dp, DP,
2388 DP_TRAINING_PATTERN_1 |
2389 DP_LINK_SCRAMBLING_DISABLE))
2390 break;
2391 2521
2392 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2522 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2393 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2523 if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2407,10 +2537,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2407 if (i == intel_dp->lane_count) { 2537 if (i == intel_dp->lane_count) {
2408 ++loop_tries; 2538 ++loop_tries;
2409 if (loop_tries == 5) { 2539 if (loop_tries == 5) {
2410 DRM_DEBUG_KMS("too many full retries, give up\n"); 2540 DRM_ERROR("too many full retries, give up\n");
2411 break; 2541 break;
2412 } 2542 }
2413 memset(intel_dp->train_set, 0, 4); 2543 intel_dp_reset_link_train(intel_dp, &DP,
2544 DP_TRAINING_PATTERN_1 |
2545 DP_LINK_SCRAMBLING_DISABLE);
2414 voltage_tries = 0; 2546 voltage_tries = 0;
2415 continue; 2547 continue;
2416 } 2548 }
@@ -2419,15 +2551,18 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2419 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 2551 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2420 ++voltage_tries; 2552 ++voltage_tries;
2421 if (voltage_tries == 5) { 2553 if (voltage_tries == 5) {
2422 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 2554 DRM_ERROR("too many voltage retries, give up\n");
2423 break; 2555 break;
2424 } 2556 }
2425 } else 2557 } else
2426 voltage_tries = 0; 2558 voltage_tries = 0;
2427 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2559 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2428 2560
2429 /* Compute new intel_dp->train_set as requested by target */ 2561 /* Update training set as requested by target */
2430 intel_get_adjust_train(intel_dp, link_status); 2562 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2563 DRM_ERROR("failed to update link training\n");
2564 break;
2565 }
2431 } 2566 }
2432 2567
2433 intel_dp->DP = DP; 2568 intel_dp->DP = DP;
@@ -2441,11 +2576,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2441 uint32_t DP = intel_dp->DP; 2576 uint32_t DP = intel_dp->DP;
2442 2577
2443 /* channel equalization */ 2578 /* channel equalization */
2579 if (!intel_dp_set_link_train(intel_dp, &DP,
2580 DP_TRAINING_PATTERN_2 |
2581 DP_LINK_SCRAMBLING_DISABLE)) {
2582 DRM_ERROR("failed to start channel equalization\n");
2583 return;
2584 }
2585
2444 tries = 0; 2586 tries = 0;
2445 cr_tries = 0; 2587 cr_tries = 0;
2446 channel_eq = false; 2588 channel_eq = false;
2447 for (;;) { 2589 for (;;) {
2448 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2590 uint8_t link_status[DP_LINK_STATUS_SIZE];
2449 2591
2450 if (cr_tries > 5) { 2592 if (cr_tries > 5) {
2451 DRM_ERROR("failed to train DP, aborting\n"); 2593 DRM_ERROR("failed to train DP, aborting\n");
@@ -2453,21 +2595,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2453 break; 2595 break;
2454 } 2596 }
2455 2597
2456 intel_dp_set_signal_levels(intel_dp, &DP);
2457
2458 /* channel eq pattern */
2459 if (!intel_dp_set_link_train(intel_dp, DP,
2460 DP_TRAINING_PATTERN_2 |
2461 DP_LINK_SCRAMBLING_DISABLE))
2462 break;
2463
2464 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2598 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2465 if (!intel_dp_get_link_status(intel_dp, link_status)) 2599 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2600 DRM_ERROR("failed to get link status\n");
2466 break; 2601 break;
2602 }
2467 2603
2468 /* Make sure clock is still ok */ 2604 /* Make sure clock is still ok */
2469 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2605 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2470 intel_dp_start_link_train(intel_dp); 2606 intel_dp_start_link_train(intel_dp);
2607 intel_dp_set_link_train(intel_dp, &DP,
2608 DP_TRAINING_PATTERN_2 |
2609 DP_LINK_SCRAMBLING_DISABLE);
2471 cr_tries++; 2610 cr_tries++;
2472 continue; 2611 continue;
2473 } 2612 }
@@ -2481,13 +2620,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2481 if (tries > 5) { 2620 if (tries > 5) {
2482 intel_dp_link_down(intel_dp); 2621 intel_dp_link_down(intel_dp);
2483 intel_dp_start_link_train(intel_dp); 2622 intel_dp_start_link_train(intel_dp);
2623 intel_dp_set_link_train(intel_dp, &DP,
2624 DP_TRAINING_PATTERN_2 |
2625 DP_LINK_SCRAMBLING_DISABLE);
2484 tries = 0; 2626 tries = 0;
2485 cr_tries++; 2627 cr_tries++;
2486 continue; 2628 continue;
2487 } 2629 }
2488 2630
2489 /* Compute new intel_dp->train_set as requested by target */ 2631 /* Update training set as requested by target */
2490 intel_get_adjust_train(intel_dp, link_status); 2632 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2633 DRM_ERROR("failed to update link training\n");
2634 break;
2635 }
2491 ++tries; 2636 ++tries;
2492 } 2637 }
2493 2638
@@ -2502,7 +2647,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2502 2647
2503void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2648void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2504{ 2649{
2505 intel_dp_set_link_train(intel_dp, intel_dp->DP, 2650 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2506 DP_TRAINING_PATTERN_DISABLE); 2651 DP_TRAINING_PATTERN_DISABLE);
2507} 2652}
2508 2653
@@ -2589,6 +2734,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2589static bool 2734static bool
2590intel_dp_get_dpcd(struct intel_dp *intel_dp) 2735intel_dp_get_dpcd(struct intel_dp *intel_dp)
2591{ 2736{
2737 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2738 struct drm_device *dev = dig_port->base.base.dev;
2739 struct drm_i915_private *dev_priv = dev->dev_private;
2740
2592 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2741 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2593 2742
2594 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2743 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2604,11 +2753,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2604 2753
2605 /* Check if the panel supports PSR */ 2754 /* Check if the panel supports PSR */
2606 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2755 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2607 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2756 if (is_edp(intel_dp)) {
2608 intel_dp->psr_dpcd, 2757 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2609 sizeof(intel_dp->psr_dpcd)); 2758 intel_dp->psr_dpcd,
2610 if (is_edp_psr(intel_dp)) 2759 sizeof(intel_dp->psr_dpcd));
2611 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2760 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2761 dev_priv->psr.sink_support = true;
2762 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2763 }
2764 }
2765
2612 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2766 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2613 DP_DWN_STRM_PORT_PRESENT)) 2767 DP_DWN_STRM_PORT_PRESENT))
2614 return true; /* native DP sink */ 2768 return true; /* native DP sink */
@@ -2728,7 +2882,6 @@ static enum drm_connector_status
2728intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2882intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2729{ 2883{
2730 uint8_t *dpcd = intel_dp->dpcd; 2884 uint8_t *dpcd = intel_dp->dpcd;
2731 bool hpd;
2732 uint8_t type; 2885 uint8_t type;
2733 2886
2734 if (!intel_dp_get_dpcd(intel_dp)) 2887 if (!intel_dp_get_dpcd(intel_dp))
@@ -2739,8 +2892,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2739 return connector_status_connected; 2892 return connector_status_connected;
2740 2893
2741 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2894 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2742 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2895 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2743 if (hpd) { 2896 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
2744 uint8_t reg; 2897 uint8_t reg;
2745 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2898 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2746 &reg, 1)) 2899 &reg, 1))
@@ -2754,9 +2907,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2754 return connector_status_connected; 2907 return connector_status_connected;
2755 2908
2756 /* Well we tried, say unknown for unreliable port types */ 2909 /* Well we tried, say unknown for unreliable port types */
2757 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2910 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
2758 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2911 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2759 return connector_status_unknown; 2912 if (type == DP_DS_PORT_TYPE_VGA ||
2913 type == DP_DS_PORT_TYPE_NON_EDID)
2914 return connector_status_unknown;
2915 } else {
2916 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2917 DP_DWN_STRM_PORT_TYPE_MASK;
2918 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
2919 type == DP_DWN_STRM_PORT_TYPE_OTHER)
2920 return connector_status_unknown;
2921 }
2760 2922
2761 /* Anything else is out of spec, warn and ignore */ 2923 /* Anything else is out of spec, warn and ignore */
2762 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2924 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2830,19 +2992,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2830 2992
2831 /* use cached edid if we have one */ 2993 /* use cached edid if we have one */
2832 if (intel_connector->edid) { 2994 if (intel_connector->edid) {
2833 struct edid *edid;
2834 int size;
2835
2836 /* invalid edid */ 2995 /* invalid edid */
2837 if (IS_ERR(intel_connector->edid)) 2996 if (IS_ERR(intel_connector->edid))
2838 return NULL; 2997 return NULL;
2839 2998
2840 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2999 return drm_edid_duplicate(intel_connector->edid);
2841 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2842 if (!edid)
2843 return NULL;
2844
2845 return edid;
2846 } 3000 }
2847 3001
2848 return drm_get_edid(connector, adapter); 3002 return drm_get_edid(connector, adapter);
@@ -3050,7 +3204,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
3050 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3204 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3051 intel_panel_fini(&intel_connector->panel); 3205 intel_panel_fini(&intel_connector->panel);
3052 3206
3053 drm_sysfs_connector_remove(connector);
3054 drm_connector_cleanup(connector); 3207 drm_connector_cleanup(connector);
3055 kfree(connector); 3208 kfree(connector);
3056} 3209}
@@ -3121,7 +3274,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3121bool intel_dpd_is_edp(struct drm_device *dev) 3274bool intel_dpd_is_edp(struct drm_device *dev)
3122{ 3275{
3123 struct drm_i915_private *dev_priv = dev->dev_private; 3276 struct drm_i915_private *dev_priv = dev->dev_private;
3124 struct child_device_config *p_child; 3277 union child_device_config *p_child;
3125 int i; 3278 int i;
3126 3279
3127 if (!dev_priv->vbt.child_dev_num) 3280 if (!dev_priv->vbt.child_dev_num)
@@ -3130,8 +3283,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3130 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3283 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3131 p_child = dev_priv->vbt.child_dev + i; 3284 p_child = dev_priv->vbt.child_dev + i;
3132 3285
3133 if (p_child->dvo_port == PORT_IDPD && 3286 if (p_child->common.dvo_port == PORT_IDPD &&
3134 p_child->device_type == DEVICE_TYPE_eDP) 3287 p_child->common.device_type == DEVICE_TYPE_eDP)
3135 return true; 3288 return true;
3136 } 3289 }
3137 return false; 3290 return false;
@@ -3164,24 +3317,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3164 struct drm_i915_private *dev_priv = dev->dev_private; 3317 struct drm_i915_private *dev_priv = dev->dev_private;
3165 struct edp_power_seq cur, vbt, spec, final; 3318 struct edp_power_seq cur, vbt, spec, final;
3166 u32 pp_on, pp_off, pp_div, pp; 3319 u32 pp_on, pp_off, pp_div, pp;
3167 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3320 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3168 3321
3169 if (HAS_PCH_SPLIT(dev)) { 3322 if (HAS_PCH_SPLIT(dev)) {
3170 pp_control_reg = PCH_PP_CONTROL; 3323 pp_ctrl_reg = PCH_PP_CONTROL;
3171 pp_on_reg = PCH_PP_ON_DELAYS; 3324 pp_on_reg = PCH_PP_ON_DELAYS;
3172 pp_off_reg = PCH_PP_OFF_DELAYS; 3325 pp_off_reg = PCH_PP_OFF_DELAYS;
3173 pp_div_reg = PCH_PP_DIVISOR; 3326 pp_div_reg = PCH_PP_DIVISOR;
3174 } else { 3327 } else {
3175 pp_control_reg = PIPEA_PP_CONTROL; 3328 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3176 pp_on_reg = PIPEA_PP_ON_DELAYS; 3329
3177 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3330 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3178 pp_div_reg = PIPEA_PP_DIVISOR; 3331 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3332 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3333 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3179 } 3334 }
3180 3335
3181 /* Workaround: Need to write PP_CONTROL with the unlock key as 3336 /* Workaround: Need to write PP_CONTROL with the unlock key as
3182 * the very first thing. */ 3337 * the very first thing. */
3183 pp = ironlake_get_pp_control(intel_dp); 3338 pp = ironlake_get_pp_control(intel_dp);
3184 I915_WRITE(pp_control_reg, pp); 3339 I915_WRITE(pp_ctrl_reg, pp);
3185 3340
3186 pp_on = I915_READ(pp_on_reg); 3341 pp_on = I915_READ(pp_on_reg);
3187 pp_off = I915_READ(pp_off_reg); 3342 pp_off = I915_READ(pp_off_reg);
@@ -3269,9 +3424,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3269 pp_off_reg = PCH_PP_OFF_DELAYS; 3424 pp_off_reg = PCH_PP_OFF_DELAYS;
3270 pp_div_reg = PCH_PP_DIVISOR; 3425 pp_div_reg = PCH_PP_DIVISOR;
3271 } else { 3426 } else {
3272 pp_on_reg = PIPEA_PP_ON_DELAYS; 3427 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3273 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3428
3274 pp_div_reg = PIPEA_PP_DIVISOR; 3429 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3430 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3431 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3275 } 3432 }
3276 3433
3277 /* And finally store the new values in the power sequencer. */ 3434 /* And finally store the new values in the power sequencer. */
@@ -3288,12 +3445,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3288 /* Haswell doesn't have any port selection bits for the panel 3445 /* Haswell doesn't have any port selection bits for the panel
3289 * power sequencer any more. */ 3446 * power sequencer any more. */
3290 if (IS_VALLEYVIEW(dev)) { 3447 if (IS_VALLEYVIEW(dev)) {
3291 port_sel = I915_READ(pp_on_reg) & 0xc0000000; 3448 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3449 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3450 else
3451 port_sel = PANEL_PORT_SELECT_DPC_VLV;
3292 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 3452 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3293 if (dp_to_dig_port(intel_dp)->port == PORT_A) 3453 if (dp_to_dig_port(intel_dp)->port == PORT_A)
3294 port_sel = PANEL_POWER_PORT_DP_A; 3454 port_sel = PANEL_PORT_SELECT_DPA;
3295 else 3455 else
3296 port_sel = PANEL_POWER_PORT_DP_D; 3456 port_sel = PANEL_PORT_SELECT_DPD;
3297 } 3457 }
3298 3458
3299 pp_on |= port_sel; 3459 pp_on |= port_sel;
@@ -3346,7 +3506,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3346 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3506 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3347 &power_seq); 3507 &power_seq);
3348 3508
3349 ironlake_edp_panel_vdd_on(intel_dp);
3350 edid = drm_get_edid(connector, &intel_dp->adapter); 3509 edid = drm_get_edid(connector, &intel_dp->adapter);
3351 if (edid) { 3510 if (edid) {
3352 if (drm_add_edid_modes(connector, edid)) { 3511 if (drm_add_edid_modes(connector, edid)) {
@@ -3378,8 +3537,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3378 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3537 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3379 } 3538 }
3380 3539
3381 ironlake_edp_panel_vdd_off(intel_dp, false);
3382
3383 intel_panel_init(&intel_connector->panel, fixed_mode); 3540 intel_panel_init(&intel_connector->panel, fixed_mode);
3384 intel_panel_setup_backlight(connector); 3541 intel_panel_setup_backlight(connector);
3385 3542
@@ -3536,11 +3693,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3536 struct drm_encoder *encoder; 3693 struct drm_encoder *encoder;
3537 struct intel_connector *intel_connector; 3694 struct intel_connector *intel_connector;
3538 3695
3539 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 3696 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3540 if (!intel_dig_port) 3697 if (!intel_dig_port)
3541 return; 3698 return;
3542 3699
3543 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 3700 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3544 if (!intel_connector) { 3701 if (!intel_connector) {
3545 kfree(intel_dig_port); 3702 kfree(intel_dig_port);
3546 return; 3703 return;
@@ -3559,12 +3716,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3559 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3716 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3560 intel_encoder->get_config = intel_dp_get_config; 3717 intel_encoder->get_config = intel_dp_get_config;
3561 if (IS_VALLEYVIEW(dev)) { 3718 if (IS_VALLEYVIEW(dev)) {
3562 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3719 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3563 intel_encoder->pre_enable = vlv_pre_enable_dp; 3720 intel_encoder->pre_enable = vlv_pre_enable_dp;
3564 intel_encoder->enable = vlv_enable_dp; 3721 intel_encoder->enable = vlv_enable_dp;
3565 } else { 3722 } else {
3566 intel_encoder->pre_enable = intel_pre_enable_dp; 3723 intel_encoder->pre_enable = g4x_pre_enable_dp;
3567 intel_encoder->enable = intel_enable_dp; 3724 intel_encoder->enable = g4x_enable_dp;
3568 } 3725 }
3569 3726
3570 intel_dig_port->port = port; 3727 intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7f2b384ac939..9d2624fd92c2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
77/* the i915, i945 have a single sDVO i2c bus - which is different */ 77/* the i915, i945 have a single sDVO i2c bus - which is different */
78#define MAX_OUTPUTS 6 78#define MAX_OUTPUTS 6
79/* maximum connectors per crtcs in the mode set */ 79/* maximum connectors per crtcs in the mode set */
80#define INTELFB_CONN_LIMIT 4
81 80
82#define INTEL_I2C_BUS_DVO 1 81#define INTEL_I2C_BUS_DVO 1
83#define INTEL_I2C_BUS_SDVO 2 82#define INTEL_I2C_BUS_SDVO 2
@@ -93,13 +92,17 @@
93#define INTEL_OUTPUT_HDMI 6 92#define INTEL_OUTPUT_HDMI 6
94#define INTEL_OUTPUT_DISPLAYPORT 7 93#define INTEL_OUTPUT_DISPLAYPORT 7
95#define INTEL_OUTPUT_EDP 8 94#define INTEL_OUTPUT_EDP 8
96#define INTEL_OUTPUT_UNKNOWN 9 95#define INTEL_OUTPUT_DSI 9
96#define INTEL_OUTPUT_UNKNOWN 10
97 97
98#define INTEL_DVO_CHIP_NONE 0 98#define INTEL_DVO_CHIP_NONE 0
99#define INTEL_DVO_CHIP_LVDS 1 99#define INTEL_DVO_CHIP_LVDS 1
100#define INTEL_DVO_CHIP_TMDS 2 100#define INTEL_DVO_CHIP_TMDS 2
101#define INTEL_DVO_CHIP_TVOUT 4 101#define INTEL_DVO_CHIP_TVOUT 4
102 102
103#define INTEL_DSI_COMMAND_MODE 0
104#define INTEL_DSI_VIDEO_MODE 1
105
103struct intel_framebuffer { 106struct intel_framebuffer {
104 struct drm_framebuffer base; 107 struct drm_framebuffer base;
105 struct drm_i915_gem_object *obj; 108 struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@ struct intel_crtc_config {
207#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 210#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
208 unsigned long quirks; 211 unsigned long quirks;
209 212
213 /* User requested mode, only valid as a starting point to
214 * compute adjusted_mode, except in the case of (S)DVO where
215 * it's also for the output timings of the (S)DVO chip.
216 * adjusted_mode will then correspond to the S(DVO) chip's
217 * preferred input timings. */
210 struct drm_display_mode requested_mode; 218 struct drm_display_mode requested_mode;
219 /* Actual pipe timings ie. what we program into the pipe timing
220 * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
211 struct drm_display_mode adjusted_mode; 221 struct drm_display_mode adjusted_mode;
222
223 /* Pipe source size (ie. panel fitter input size)
224 * All planes will be positioned inside this space,
225 * and get clipped at the edges. */
226 int pipe_src_w, pipe_src_h;
227
212 /* Whether to set up the PCH/FDI. Note that we never allow sharing 228 /* Whether to set up the PCH/FDI. Note that we never allow sharing
213 * between pch encoders and cpu encoders. */ 229 * between pch encoders and cpu encoders. */
214 bool has_pch_encoder; 230 bool has_pch_encoder;
@@ -262,7 +278,8 @@ struct intel_crtc_config {
262 278
263 /* 279 /*
264 * Frequence the dpll for the port should run at. Differs from the 280 * Frequence the dpll for the port should run at. Differs from the
265 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. 281 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
282 * already multiplied by pixel_multiplier.
266 */ 283 */
267 int port_clock; 284 int port_clock;
268 285
@@ -288,6 +305,14 @@ struct intel_crtc_config {
288 struct intel_link_m_n fdi_m_n; 305 struct intel_link_m_n fdi_m_n;
289 306
290 bool ips_enabled; 307 bool ips_enabled;
308
309 bool double_wide;
310};
311
312struct intel_pipe_wm {
313 struct intel_wm_level wm[5];
314 uint32_t linetime;
315 bool fbc_wm_enabled;
291}; 316};
292 317
293struct intel_crtc { 318struct intel_crtc {
@@ -301,8 +326,9 @@ struct intel_crtc {
301 * some outputs connected to this crtc. 326 * some outputs connected to this crtc.
302 */ 327 */
303 bool active; 328 bool active;
329 unsigned long enabled_power_domains;
304 bool eld_vld; 330 bool eld_vld;
305 bool primary_disabled; /* is the crtc obscured by a plane? */ 331 bool primary_enabled; /* is the primary plane (partially) visible? */
306 bool lowfreq_avail; 332 bool lowfreq_avail;
307 struct intel_overlay *overlay; 333 struct intel_overlay *overlay;
308 struct intel_unpin_work *unpin_work; 334 struct intel_unpin_work *unpin_work;
@@ -330,6 +356,12 @@ struct intel_crtc {
330 /* Access to these should be protected by dev_priv->irq_lock. */ 356 /* Access to these should be protected by dev_priv->irq_lock. */
331 bool cpu_fifo_underrun_disabled; 357 bool cpu_fifo_underrun_disabled;
332 bool pch_fifo_underrun_disabled; 358 bool pch_fifo_underrun_disabled;
359
360 /* per-pipe watermark state */
361 struct {
362 /* watermarks currently being used */
363 struct intel_pipe_wm active;
364 } wm;
333}; 365};
334 366
335struct intel_plane_wm_parameters { 367struct intel_plane_wm_parameters {
@@ -417,13 +449,11 @@ struct intel_hdmi {
417}; 449};
418 450
419#define DP_MAX_DOWNSTREAM_PORTS 0x10 451#define DP_MAX_DOWNSTREAM_PORTS 0x10
420#define DP_LINK_CONFIGURATION_SIZE 9
421 452
422struct intel_dp { 453struct intel_dp {
423 uint32_t output_reg; 454 uint32_t output_reg;
424 uint32_t aux_ch_ctl_reg; 455 uint32_t aux_ch_ctl_reg;
425 uint32_t DP; 456 uint32_t DP;
426 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
427 bool has_audio; 457 bool has_audio;
428 enum hdmi_force_audio force_audio; 458 enum hdmi_force_audio force_audio;
429 uint32_t color_range; 459 uint32_t color_range;
@@ -495,80 +525,6 @@ struct intel_unpin_work {
495 bool enable_stall_check; 525 bool enable_stall_check;
496}; 526};
497 527
498int intel_pch_rawclk(struct drm_device *dev);
499
500int intel_connector_update_modes(struct drm_connector *connector,
501 struct edid *edid);
502int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
503
504extern void intel_attach_force_audio_property(struct drm_connector *connector);
505extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
506
507extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
508extern void intel_crt_init(struct drm_device *dev);
509extern void intel_hdmi_init(struct drm_device *dev,
510 int hdmi_reg, enum port port);
511extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
512 struct intel_connector *intel_connector);
513extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
514extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
515 struct intel_crtc_config *pipe_config);
516extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
517 bool is_sdvob);
518extern void intel_dvo_init(struct drm_device *dev);
519extern void intel_tv_init(struct drm_device *dev);
520extern void intel_mark_busy(struct drm_device *dev);
521extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
522 struct intel_ring_buffer *ring);
523extern void intel_mark_idle(struct drm_device *dev);
524extern void intel_lvds_init(struct drm_device *dev);
525extern bool intel_is_dual_link_lvds(struct drm_device *dev);
526extern void intel_dp_init(struct drm_device *dev, int output_reg,
527 enum port port);
528extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
529 struct intel_connector *intel_connector);
530extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
531extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
532extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
533extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
534extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
535extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
536extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
537extern bool intel_dp_compute_config(struct intel_encoder *encoder,
538 struct intel_crtc_config *pipe_config);
539extern bool intel_dpd_is_edp(struct drm_device *dev);
540extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
541extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
542extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
543extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
544extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
545extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
546extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
547extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
548 enum plane plane);
549
550/* intel_panel.c */
551extern int intel_panel_init(struct intel_panel *panel,
552 struct drm_display_mode *fixed_mode);
553extern void intel_panel_fini(struct intel_panel *panel);
554
555extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
556 struct drm_display_mode *adjusted_mode);
557extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
558 struct intel_crtc_config *pipe_config,
559 int fitting_mode);
560extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
561 struct intel_crtc_config *pipe_config,
562 int fitting_mode);
563extern void intel_panel_set_backlight(struct drm_device *dev,
564 u32 level, u32 max);
565extern int intel_panel_setup_backlight(struct drm_connector *connector);
566extern void intel_panel_enable_backlight(struct drm_device *dev,
567 enum pipe pipe);
568extern void intel_panel_disable_backlight(struct drm_device *dev);
569extern void intel_panel_destroy_backlight(struct drm_device *dev);
570extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
571
572struct intel_set_config { 528struct intel_set_config {
573 struct drm_encoder **save_connector_encoders; 529 struct drm_encoder **save_connector_encoders;
574 struct drm_crtc **save_encoder_crtcs; 530 struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +533,14 @@ struct intel_set_config {
577 bool mode_changed; 533 bool mode_changed;
578}; 534};
579 535
580extern void intel_crtc_restore_mode(struct drm_crtc *crtc); 536struct intel_load_detect_pipe {
581extern void intel_crtc_load_lut(struct drm_crtc *crtc); 537 struct drm_framebuffer *release_fb;
582extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 538 bool load_detect_temp;
583extern void intel_encoder_destroy(struct drm_encoder *encoder); 539 int dpms_mode;
584extern void intel_connector_dpms(struct drm_connector *, int mode); 540};
585extern bool intel_connector_get_hw_state(struct intel_connector *connector);
586extern void intel_modeset_check_state(struct drm_device *dev);
587extern void intel_plane_restore(struct drm_plane *plane);
588extern void intel_plane_disable(struct drm_plane *plane);
589
590 541
591static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 542static inline struct intel_encoder *
543intel_attached_encoder(struct drm_connector *connector)
592{ 544{
593 return to_intel_connector(connector)->encoder; 545 return to_intel_connector(connector)->encoder;
594} 546}
@@ -616,73 +568,94 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
616 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 568 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
617} 569}
618 570
571
572/* i915_irq.c */
573bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
574 enum pipe pipe, bool enable);
575bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
576 enum transcoder pch_transcoder,
577 bool enable);
578void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
579void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
580void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
581void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
582void hsw_pc8_disable_interrupts(struct drm_device *dev);
583void hsw_pc8_restore_interrupts(struct drm_device *dev);
584
585
586/* intel_crt.c */
587void intel_crt_init(struct drm_device *dev);
588
589
590/* intel_ddi.c */
591void intel_prepare_ddi(struct drm_device *dev);
592void hsw_fdi_link_train(struct drm_crtc *crtc);
593void intel_ddi_init(struct drm_device *dev, enum port port);
594enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
595bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
596int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
597void intel_ddi_pll_init(struct drm_device *dev);
598void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
599void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
600 enum transcoder cpu_transcoder);
601void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
602void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
603void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
604bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
605void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
606void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
607void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
608bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
609void intel_ddi_fdi_disable(struct drm_crtc *crtc);
610void intel_ddi_get_config(struct intel_encoder *encoder,
611 struct intel_crtc_config *pipe_config);
612
613
614/* intel_display.c */
615int intel_pch_rawclk(struct drm_device *dev);
616void intel_mark_busy(struct drm_device *dev);
617void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
618 struct intel_ring_buffer *ring);
619void intel_mark_idle(struct drm_device *dev);
620void intel_crtc_restore_mode(struct drm_crtc *crtc);
621void intel_crtc_update_dpms(struct drm_crtc *crtc);
622void intel_encoder_destroy(struct drm_encoder *encoder);
623void intel_connector_dpms(struct drm_connector *, int mode);
624bool intel_connector_get_hw_state(struct intel_connector *connector);
625void intel_modeset_check_state(struct drm_device *dev);
619bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 626bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
620 struct intel_digital_port *port); 627 struct intel_digital_port *port);
621 628void intel_connector_attach_encoder(struct intel_connector *connector,
622extern void intel_connector_attach_encoder(struct intel_connector *connector, 629 struct intel_encoder *encoder);
623 struct intel_encoder *encoder); 630struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
624extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 631struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
625 632 struct drm_crtc *crtc);
626extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
627 struct drm_crtc *crtc);
628int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 633int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
629 struct drm_file *file_priv); 634 struct drm_file *file_priv);
630extern enum transcoder 635enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
631intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 636 enum pipe pipe);
632 enum pipe pipe); 637void intel_wait_for_vblank(struct drm_device *dev, int pipe);
633extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 638void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
634extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 639int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
635extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 640void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
636extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port); 641bool intel_get_load_detect_pipe(struct drm_connector *connector,
637 642 struct drm_display_mode *mode,
638struct intel_load_detect_pipe { 643 struct intel_load_detect_pipe *old);
639 struct drm_framebuffer *release_fb; 644void intel_release_load_detect_pipe(struct drm_connector *connector,
640 bool load_detect_temp; 645 struct intel_load_detect_pipe *old);
641 int dpms_mode; 646int intel_pin_and_fence_fb_obj(struct drm_device *dev,
642}; 647 struct drm_i915_gem_object *obj,
643extern bool intel_get_load_detect_pipe(struct drm_connector *connector, 648 struct intel_ring_buffer *pipelined);
644 struct drm_display_mode *mode, 649void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
645 struct intel_load_detect_pipe *old); 650int intel_framebuffer_init(struct drm_device *dev,
646extern void intel_release_load_detect_pipe(struct drm_connector *connector, 651 struct intel_framebuffer *ifb,
647 struct intel_load_detect_pipe *old); 652 struct drm_mode_fb_cmd2 *mode_cmd,
648 653 struct drm_i915_gem_object *obj);
649extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 654void intel_framebuffer_fini(struct intel_framebuffer *fb);
650 u16 blue, int regno); 655void intel_prepare_page_flip(struct drm_device *dev, int plane);
651extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 656void intel_finish_page_flip(struct drm_device *dev, int pipe);
652 u16 *blue, int regno); 657void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
653 658struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
654extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
655 struct drm_i915_gem_object *obj,
656 struct intel_ring_buffer *pipelined);
657extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
658
659extern int intel_framebuffer_init(struct drm_device *dev,
660 struct intel_framebuffer *ifb,
661 struct drm_mode_fb_cmd2 *mode_cmd,
662 struct drm_i915_gem_object *obj);
663extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
664extern int intel_fbdev_init(struct drm_device *dev);
665extern void intel_fbdev_initial_config(struct drm_device *dev);
666extern void intel_fbdev_fini(struct drm_device *dev);
667extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
668extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
669extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
670extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
671
672extern void intel_setup_overlay(struct drm_device *dev);
673extern void intel_cleanup_overlay(struct drm_device *dev);
674extern int intel_overlay_switch_off(struct intel_overlay *overlay);
675extern int intel_overlay_put_image(struct drm_device *dev, void *data,
676 struct drm_file *file_priv);
677extern int intel_overlay_attrs(struct drm_device *dev, void *data,
678 struct drm_file *file_priv);
679
680extern void intel_fb_output_poll_changed(struct drm_device *dev);
681extern void intel_fb_restore_mode(struct drm_device *dev);
682
683struct intel_shared_dpll *
684intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
685
686void assert_shared_dpll(struct drm_i915_private *dev_priv, 659void assert_shared_dpll(struct drm_i915_private *dev_priv,
687 struct intel_shared_dpll *pll, 660 struct intel_shared_dpll *pll,
688 bool state); 661 bool state);
@@ -696,104 +669,198 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
696 enum pipe pipe, bool state); 669 enum pipe pipe, bool state);
697#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) 670#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
698#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) 671#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
699extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 672void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
700 bool state);
701#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 673#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
702#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 674#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
675void intel_write_eld(struct drm_encoder *encoder,
676 struct drm_display_mode *mode);
677unsigned long intel_gen4_compute_page_offset(int *x, int *y,
678 unsigned int tiling_mode,
679 unsigned int bpp,
680 unsigned int pitch);
681void intel_display_handle_reset(struct drm_device *dev);
682void hsw_enable_pc8_work(struct work_struct *__work);
683void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
684void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
685void intel_dp_get_m_n(struct intel_crtc *crtc,
686 struct intel_crtc_config *pipe_config);
687int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
688void
689ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
690 int dotclock);
691bool intel_crtc_active(struct drm_crtc *crtc);
692void i915_disable_vga_mem(struct drm_device *dev);
693void hsw_enable_ips(struct intel_crtc *crtc);
694void hsw_disable_ips(struct intel_crtc *crtc);
695void intel_display_set_init_power(struct drm_device *dev, bool enable);
696
697
698/* intel_dp.c */
699void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
700bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
701 struct intel_connector *intel_connector);
702void intel_dp_start_link_train(struct intel_dp *intel_dp);
703void intel_dp_complete_link_train(struct intel_dp *intel_dp);
704void intel_dp_stop_link_train(struct intel_dp *intel_dp);
705void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
706void intel_dp_encoder_destroy(struct drm_encoder *encoder);
707void intel_dp_check_link_status(struct intel_dp *intel_dp);
708bool intel_dp_compute_config(struct intel_encoder *encoder,
709 struct intel_crtc_config *pipe_config);
710bool intel_dpd_is_edp(struct drm_device *dev);
711void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
712void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
713void ironlake_edp_panel_on(struct intel_dp *intel_dp);
714void ironlake_edp_panel_off(struct intel_dp *intel_dp);
715void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
716void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
717void intel_edp_psr_enable(struct intel_dp *intel_dp);
718void intel_edp_psr_disable(struct intel_dp *intel_dp);
719void intel_edp_psr_update(struct drm_device *dev);
720
721
722/* intel_dsi.c */
723bool intel_dsi_init(struct drm_device *dev);
724
725
726/* intel_dvo.c */
727void intel_dvo_init(struct drm_device *dev);
728
729
730/* legacy fbdev emulation in intel_fbdev.c */
731#ifdef CONFIG_DRM_I915_FBDEV
732extern int intel_fbdev_init(struct drm_device *dev);
733extern void intel_fbdev_initial_config(struct drm_device *dev);
734extern void intel_fbdev_fini(struct drm_device *dev);
735extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
736extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
737extern void intel_fbdev_restore_mode(struct drm_device *dev);
738#else
739static inline int intel_fbdev_init(struct drm_device *dev)
740{
741 return 0;
742}
703 743
704extern void intel_init_clock_gating(struct drm_device *dev); 744static inline void intel_fbdev_initial_config(struct drm_device *dev)
705extern void intel_suspend_hw(struct drm_device *dev); 745{
706extern void intel_write_eld(struct drm_encoder *encoder, 746}
707 struct drm_display_mode *mode); 747
708extern void intel_prepare_ddi(struct drm_device *dev); 748static inline void intel_fbdev_fini(struct drm_device *dev)
709extern void hsw_fdi_link_train(struct drm_crtc *crtc); 749{
710extern void intel_ddi_init(struct drm_device *dev, enum port port); 750}
711 751
712/* For use by IVB LP watermark workaround in intel_sprite.c */ 752static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
713extern void intel_update_watermarks(struct drm_device *dev); 753{
714extern void intel_update_sprite_watermarks(struct drm_plane *plane, 754}
715 struct drm_crtc *crtc, 755
716 uint32_t sprite_width, int pixel_size, 756static inline void intel_fbdev_restore_mode(struct drm_device *dev)
717 bool enabled, bool scaled); 757{
718 758}
719extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 759#endif
720 unsigned int tiling_mode, 760
721 unsigned int bpp, 761/* intel_hdmi.c */
722 unsigned int pitch); 762void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
723 763void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
724extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 764 struct intel_connector *intel_connector);
725 struct drm_file *file_priv); 765struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
726extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 766bool intel_hdmi_compute_config(struct intel_encoder *encoder,
727 struct drm_file *file_priv); 767 struct intel_crtc_config *pipe_config);
728 768
729/* Power-related functions, located in intel_pm.c */ 769
730extern void intel_init_pm(struct drm_device *dev); 770/* intel_lvds.c */
731/* FBC */ 771void intel_lvds_init(struct drm_device *dev);
732extern bool intel_fbc_enabled(struct drm_device *dev); 772bool intel_is_dual_link_lvds(struct drm_device *dev);
733extern void intel_update_fbc(struct drm_device *dev); 773
734/* IPS */ 774
735extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 775/* intel_modes.c */
736extern void intel_gpu_ips_teardown(void); 776int intel_connector_update_modes(struct drm_connector *connector,
737 777 struct edid *edid);
738/* Power well */ 778int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
739extern int i915_init_power_well(struct drm_device *dev); 779void intel_attach_force_audio_property(struct drm_connector *connector);
740extern void i915_remove_power_well(struct drm_device *dev); 780void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
741 781
742extern bool intel_display_power_enabled(struct drm_device *dev, 782
743 enum intel_display_power_domain domain); 783/* intel_overlay.c */
744extern void intel_init_power_well(struct drm_device *dev); 784void intel_setup_overlay(struct drm_device *dev);
745extern void intel_set_power_well(struct drm_device *dev, bool enable); 785void intel_cleanup_overlay(struct drm_device *dev);
746extern void intel_enable_gt_powersave(struct drm_device *dev); 786int intel_overlay_switch_off(struct intel_overlay *overlay);
747extern void intel_disable_gt_powersave(struct drm_device *dev); 787int intel_overlay_put_image(struct drm_device *dev, void *data,
748extern void ironlake_teardown_rc6(struct drm_device *dev); 788 struct drm_file *file_priv);
789int intel_overlay_attrs(struct drm_device *dev, void *data,
790 struct drm_file *file_priv);
791
792
793/* intel_panel.c */
794int intel_panel_init(struct intel_panel *panel,
795 struct drm_display_mode *fixed_mode);
796void intel_panel_fini(struct intel_panel *panel);
797void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
798 struct drm_display_mode *adjusted_mode);
799void intel_pch_panel_fitting(struct intel_crtc *crtc,
800 struct intel_crtc_config *pipe_config,
801 int fitting_mode);
802void intel_gmch_panel_fitting(struct intel_crtc *crtc,
803 struct intel_crtc_config *pipe_config,
804 int fitting_mode);
805void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max);
806int intel_panel_setup_backlight(struct drm_connector *connector);
807void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe);
808void intel_panel_disable_backlight(struct drm_device *dev);
809void intel_panel_destroy_backlight(struct drm_device *dev);
810enum drm_connector_status intel_panel_detect(struct drm_device *dev);
811
812
813/* intel_pm.c */
814void intel_init_clock_gating(struct drm_device *dev);
815void intel_suspend_hw(struct drm_device *dev);
816void intel_update_watermarks(struct drm_crtc *crtc);
817void intel_update_sprite_watermarks(struct drm_plane *plane,
818 struct drm_crtc *crtc,
819 uint32_t sprite_width, int pixel_size,
820 bool enabled, bool scaled);
821void intel_init_pm(struct drm_device *dev);
822bool intel_fbc_enabled(struct drm_device *dev);
823void intel_update_fbc(struct drm_device *dev);
824void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
825void intel_gpu_ips_teardown(void);
826int intel_power_domains_init(struct drm_device *dev);
827void intel_power_domains_remove(struct drm_device *dev);
828bool intel_display_power_enabled(struct drm_device *dev,
829 enum intel_display_power_domain domain);
830void intel_display_power_get(struct drm_device *dev,
831 enum intel_display_power_domain domain);
832void intel_display_power_put(struct drm_device *dev,
833 enum intel_display_power_domain domain);
834void intel_power_domains_init_hw(struct drm_device *dev);
835void intel_set_power_well(struct drm_device *dev, bool enable);
836void intel_enable_gt_powersave(struct drm_device *dev);
837void intel_disable_gt_powersave(struct drm_device *dev);
838void ironlake_teardown_rc6(struct drm_device *dev);
749void gen6_update_ring_freq(struct drm_device *dev); 839void gen6_update_ring_freq(struct drm_device *dev);
840void gen6_rps_idle(struct drm_i915_private *dev_priv);
841void gen6_rps_boost(struct drm_i915_private *dev_priv);
842void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
843void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
844void ilk_wm_get_hw_state(struct drm_device *dev);
845
846
847/* intel_sdvo.c */
848bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
849
850
851/* intel_sprite.c */
852int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
853void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
854 enum plane plane);
855void intel_plane_restore(struct drm_plane *plane);
856void intel_plane_disable(struct drm_plane *plane);
857int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
858 struct drm_file *file_priv);
859int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
860 struct drm_file *file_priv);
861
750 862
751extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 863/* intel_tv.c */
752 enum pipe *pipe); 864void intel_tv_init(struct drm_device *dev);
753extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
754extern void intel_ddi_pll_init(struct drm_device *dev);
755extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
756extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
757 enum transcoder cpu_transcoder);
758extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
759extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
760extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
761extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
762extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
763extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
764extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
765extern bool
766intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
767extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
768extern void intel_ddi_get_config(struct intel_encoder *encoder,
769 struct intel_crtc_config *pipe_config);
770
771extern void intel_display_handle_reset(struct drm_device *dev);
772extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
773 enum pipe pipe,
774 bool enable);
775extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
776 enum transcoder pch_transcoder,
777 bool enable);
778
779extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
780extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
781extern void intel_edp_psr_update(struct drm_device *dev);
782extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
783 bool switch_to_fclk, bool allow_power_down);
784extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
785extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
786extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
787 uint32_t mask);
788extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
789extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
790 uint32_t mask);
791extern void hsw_enable_pc8_work(struct work_struct *__work);
792extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
793extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
794extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
795extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
796extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
797extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
798 865
799#endif /* __INTEL_DRV_H__ */ 866#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..d257b093ca68
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,620 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm_crtc.h>
28#include <drm/drm_edid.h>
29#include <drm/i915_drm.h>
30#include <linux/slab.h>
31#include "i915_drv.h"
32#include "intel_drv.h"
33#include "intel_dsi.h"
34#include "intel_dsi_cmd.h"
35
36/* the sub-encoders aka panel drivers */
37static const struct intel_dsi_device intel_dsi_devices[] = {
38};
39
40
41static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
42 u32 mask)
43{
44 u32 tmp = vlv_cck_read(dev_priv, reg);
45 tmp &= ~mask;
46 tmp |= val;
47 vlv_cck_write(dev_priv, reg, tmp);
48}
49
50static void band_gap_wa(struct drm_i915_private *dev_priv)
51{
52 mutex_lock(&dev_priv->dpio_lock);
53
54 /* Enable bandgap fix in GOP driver */
55 vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
56 msleep(20);
57 vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
58 msleep(20);
59 vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
60 msleep(20);
61 vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
62 msleep(20);
63 vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
64 msleep(20);
65
66 /* Turn Display Trunk on */
67 vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
68 msleep(20);
69
70 vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
71 msleep(20);
72
73 vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
74 msleep(20);
75 vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
76 msleep(20);
77 vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
78
79 mutex_unlock(&dev_priv->dpio_lock);
80
81 /* Need huge delay, otherwise clock is not stable */
82 msleep(100);
83}
84
85static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
86{
87 return container_of(intel_attached_encoder(connector),
88 struct intel_dsi, base);
89}
90
91static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
92{
93 return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
94}
95
96static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
97{
98 return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
99}
100
101static void intel_dsi_hot_plug(struct intel_encoder *encoder)
102{
103 DRM_DEBUG_KMS("\n");
104}
105
106static bool intel_dsi_compute_config(struct intel_encoder *encoder,
107 struct intel_crtc_config *config)
108{
109 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
110 base);
111 struct intel_connector *intel_connector = intel_dsi->attached_connector;
112 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
113 struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
114 struct drm_display_mode *mode = &config->requested_mode;
115
116 DRM_DEBUG_KMS("\n");
117
118 if (fixed_mode)
119 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
120
121 if (intel_dsi->dev.dev_ops->mode_fixup)
122 return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
123 mode, adjusted_mode);
124
125 return true;
126}
127
128static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
129{
130 DRM_DEBUG_KMS("\n");
131
132 vlv_enable_dsi_pll(encoder);
133}
134
135static void intel_dsi_pre_enable(struct intel_encoder *encoder)
136{
137 DRM_DEBUG_KMS("\n");
138}
139
140static void intel_dsi_enable(struct intel_encoder *encoder)
141{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
143 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
144 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
145 int pipe = intel_crtc->pipe;
146 u32 temp;
147
148 DRM_DEBUG_KMS("\n");
149
150 temp = I915_READ(MIPI_DEVICE_READY(pipe));
151 if ((temp & DEVICE_READY) == 0) {
152 temp &= ~ULPS_STATE_MASK;
153 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
154 } else if (temp & ULPS_STATE_MASK) {
155 temp &= ~ULPS_STATE_MASK;
156 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
157 /*
158 * We need to ensure that there is a minimum of 1 ms time
159 * available before clearing the UPLS exit state.
160 */
161 msleep(2);
162 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
163 }
164
165 if (is_cmd_mode(intel_dsi))
166 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
167
168 if (is_vid_mode(intel_dsi)) {
169 msleep(20); /* XXX */
170 dpi_send_cmd(intel_dsi, TURN_ON);
171 msleep(100);
172
173 /* assert ip_tg_enable signal */
174 temp = I915_READ(MIPI_PORT_CTRL(pipe));
175 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
176 POSTING_READ(MIPI_PORT_CTRL(pipe));
177 }
178
179 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
180}
181
182static void intel_dsi_disable(struct intel_encoder *encoder)
183{
184 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
185 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
186 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
187 int pipe = intel_crtc->pipe;
188 u32 temp;
189
190 DRM_DEBUG_KMS("\n");
191
192 intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
193
194 if (is_vid_mode(intel_dsi)) {
195 dpi_send_cmd(intel_dsi, SHUTDOWN);
196 msleep(10);
197
198 /* de-assert ip_tg_enable signal */
199 temp = I915_READ(MIPI_PORT_CTRL(pipe));
200 I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
201 POSTING_READ(MIPI_PORT_CTRL(pipe));
202
203 msleep(2);
204 }
205
206 temp = I915_READ(MIPI_DEVICE_READY(pipe));
207 if (temp & DEVICE_READY) {
208 temp &= ~DEVICE_READY;
209 temp &= ~ULPS_STATE_MASK;
210 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
211 }
212}
213
214static void intel_dsi_post_disable(struct intel_encoder *encoder)
215{
216 DRM_DEBUG_KMS("\n");
217
218 vlv_disable_dsi_pll(encoder);
219}
220
221static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
222 enum pipe *pipe)
223{
224 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
225 u32 port, func;
226 enum pipe p;
227
228 DRM_DEBUG_KMS("\n");
229
230 /* XXX: this only works for one DSI output */
231 for (p = PIPE_A; p <= PIPE_B; p++) {
232 port = I915_READ(MIPI_PORT_CTRL(p));
233 func = I915_READ(MIPI_DSI_FUNC_PRG(p));
234
235 if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
236 if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
237 *pipe = p;
238 return true;
239 }
240 }
241 }
242
243 return false;
244}
245
246static void intel_dsi_get_config(struct intel_encoder *encoder,
247 struct intel_crtc_config *pipe_config)
248{
249 DRM_DEBUG_KMS("\n");
250
251 /* XXX: read flags, set to adjusted_mode */
252}
253
254static int intel_dsi_mode_valid(struct drm_connector *connector,
255 struct drm_display_mode *mode)
256{
257 struct intel_connector *intel_connector = to_intel_connector(connector);
258 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
259 struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
260
261 DRM_DEBUG_KMS("\n");
262
263 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
264 DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
265 return MODE_NO_DBLESCAN;
266 }
267
268 if (fixed_mode) {
269 if (mode->hdisplay > fixed_mode->hdisplay)
270 return MODE_PANEL;
271 if (mode->vdisplay > fixed_mode->vdisplay)
272 return MODE_PANEL;
273 }
274
275 return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
276}
277
278/* return txclkesc cycles in terms of divider and duration in us */
279static u16 txclkesc(u32 divider, unsigned int us)
280{
281 switch (divider) {
282 case ESCAPE_CLOCK_DIVIDER_1:
283 default:
284 return 20 * us;
285 case ESCAPE_CLOCK_DIVIDER_2:
286 return 10 * us;
287 case ESCAPE_CLOCK_DIVIDER_4:
288 return 5 * us;
289 }
290}
291
292/* return pixels in terms of txbyteclkhs */
293static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
294{
295 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
296}
297
298static void set_dsi_timings(struct drm_encoder *encoder,
299 const struct drm_display_mode *mode)
300{
301 struct drm_device *dev = encoder->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
304 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
305 int pipe = intel_crtc->pipe;
306 unsigned int bpp = intel_crtc->config.pipe_bpp;
307 unsigned int lane_count = intel_dsi->lane_count;
308
309 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
310
311 hactive = mode->hdisplay;
312 hfp = mode->hsync_start - mode->hdisplay;
313 hsync = mode->hsync_end - mode->hsync_start;
314 hbp = mode->htotal - mode->hsync_end;
315
316 vfp = mode->vsync_start - mode->vdisplay;
317 vsync = mode->vsync_end - mode->vsync_start;
318 vbp = mode->vtotal - mode->vsync_end;
319
320 /* horizontal values are in terms of high speed byte clock */
321 hactive = txbyteclkhs(hactive, bpp, lane_count);
322 hfp = txbyteclkhs(hfp, bpp, lane_count);
323 hsync = txbyteclkhs(hsync, bpp, lane_count);
324 hbp = txbyteclkhs(hbp, bpp, lane_count);
325
326 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
327 I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
328
329 /* meaningful for video mode non-burst sync pulse mode only, can be zero
330 * for non-burst sync events and burst modes */
331 I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
332 I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
333
334 /* vertical values are in terms of lines */
335 I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
336 I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
337 I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
338}
339
340static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
341{
342 struct drm_encoder *encoder = &intel_encoder->base;
343 struct drm_device *dev = encoder->dev;
344 struct drm_i915_private *dev_priv = dev->dev_private;
345 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
346 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
347 struct drm_display_mode *adjusted_mode =
348 &intel_crtc->config.adjusted_mode;
349 int pipe = intel_crtc->pipe;
350 unsigned int bpp = intel_crtc->config.pipe_bpp;
351 u32 val, tmp;
352
353 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
354
355 /* Update the DSI PLL */
356 vlv_enable_dsi_pll(intel_encoder);
357
358 /* XXX: Location of the call */
359 band_gap_wa(dev_priv);
360
361 /* escape clock divider, 20MHz, shared for A and C. device ready must be
362 * off when doing this! txclkesc? */
363 tmp = I915_READ(MIPI_CTRL(0));
364 tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
365 I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
366
367 /* read request priority is per pipe */
368 tmp = I915_READ(MIPI_CTRL(pipe));
369 tmp &= ~READ_REQUEST_PRIORITY_MASK;
370 I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
371
372 /* XXX: why here, why like this? handling in irq handler?! */
373 I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
374 I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
375
376 I915_WRITE(MIPI_DPHY_PARAM(pipe),
377 0x3c << EXIT_ZERO_COUNT_SHIFT |
378 0x1f << TRAIL_COUNT_SHIFT |
379 0xc5 << CLK_ZERO_COUNT_SHIFT |
380 0x1f << PREPARE_COUNT_SHIFT);
381
382 I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
383 adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
384 adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
385
386 set_dsi_timings(encoder, adjusted_mode);
387
388 val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
389 if (is_cmd_mode(intel_dsi)) {
390 val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
391 val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
392 } else {
393 val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
394
395 /* XXX: cross-check bpp vs. pixel format? */
396 val |= intel_dsi->pixel_format;
397 }
398 I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
399
400 /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
401 * stop state. */
402
403 /*
404 * In burst mode, value greater than one DPI line Time in byte clock
405 * (txbyteclkhs) To timeout this timer 1+ of the above said value is
406 * recommended.
407 *
408 * In non-burst mode, Value greater than one DPI frame time in byte
409 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
410 * is recommended.
411 *
412 * In DBI only mode, value greater than one DBI frame time in byte
413 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
414 * is recommended.
415 */
416
417 if (is_vid_mode(intel_dsi) &&
418 intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
419 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
420 txbyteclkhs(adjusted_mode->htotal, bpp,
421 intel_dsi->lane_count) + 1);
422 } else {
423 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
424 txbyteclkhs(adjusted_mode->vtotal *
425 adjusted_mode->htotal,
426 bpp, intel_dsi->lane_count) + 1);
427 }
428 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
429 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
430 I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
431
432 /* dphy stuff */
433
434 /* in terms of low power clock */
435 I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
436
437 /* recovery disables */
438 I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
439
440 /* in terms of txbyteclkhs. actual high to low switch +
441 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
442 *
443 * XXX: write MIPI_STOP_STATE_STALL?
444 */
445 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
446
447 /* XXX: low power clock equivalence in terms of byte clock. the number
448 * of byte clocks occupied in one low power clock. based on txbyteclkhs
449 * and txclkesc. txclkesc time / txbyteclk time * (105 +
450 * MIPI_STOP_STATE_STALL) / 105.???
451 */
452 I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
453
454 /* the bw essential for transmitting 16 long packets containing 252
455 * bytes meant for dcs write memory command is programmed in this
456 * register in terms of byte clocks. based on dsi transfer rate and the
457 * number of lanes configured the time taken to transmit 16 long packets
458 * in a dsi stream varies. */
459 I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
460
461 I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
462 0xa << LP_HS_SSW_CNT_SHIFT |
463 0x14 << HS_LP_PWR_SW_CNT_SHIFT);
464
465 if (is_vid_mode(intel_dsi))
466 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
467 intel_dsi->video_mode_format);
468}
469
470static enum drm_connector_status
471intel_dsi_detect(struct drm_connector *connector, bool force)
472{
473 struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
474 DRM_DEBUG_KMS("\n");
475 return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
476}
477
478static int intel_dsi_get_modes(struct drm_connector *connector)
479{
480 struct intel_connector *intel_connector = to_intel_connector(connector);
481 struct drm_display_mode *mode;
482
483 DRM_DEBUG_KMS("\n");
484
485 if (!intel_connector->panel.fixed_mode) {
486 DRM_DEBUG_KMS("no fixed mode\n");
487 return 0;
488 }
489
490 mode = drm_mode_duplicate(connector->dev,
491 intel_connector->panel.fixed_mode);
492 if (!mode) {
493 DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
494 return 0;
495 }
496
497 drm_mode_probed_add(connector, mode);
498 return 1;
499}
500
501static void intel_dsi_destroy(struct drm_connector *connector)
502{
503 struct intel_connector *intel_connector = to_intel_connector(connector);
504
505 DRM_DEBUG_KMS("\n");
506 intel_panel_fini(&intel_connector->panel);
507 drm_connector_cleanup(connector);
508 kfree(connector);
509}
510
511static const struct drm_encoder_funcs intel_dsi_funcs = {
512 .destroy = intel_encoder_destroy,
513};
514
515static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
516 .get_modes = intel_dsi_get_modes,
517 .mode_valid = intel_dsi_mode_valid,
518 .best_encoder = intel_best_encoder,
519};
520
521static const struct drm_connector_funcs intel_dsi_connector_funcs = {
522 .dpms = intel_connector_dpms,
523 .detect = intel_dsi_detect,
524 .destroy = intel_dsi_destroy,
525 .fill_modes = drm_helper_probe_single_connector_modes,
526};
527
528bool intel_dsi_init(struct drm_device *dev)
529{
530 struct intel_dsi *intel_dsi;
531 struct intel_encoder *intel_encoder;
532 struct drm_encoder *encoder;
533 struct intel_connector *intel_connector;
534 struct drm_connector *connector;
535 struct drm_display_mode *fixed_mode = NULL;
536 const struct intel_dsi_device *dsi;
537 unsigned int i;
538
539 DRM_DEBUG_KMS("\n");
540
541 intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
542 if (!intel_dsi)
543 return false;
544
545 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
546 if (!intel_connector) {
547 kfree(intel_dsi);
548 return false;
549 }
550
551 intel_encoder = &intel_dsi->base;
552 encoder = &intel_encoder->base;
553 intel_dsi->attached_connector = intel_connector;
554
555 connector = &intel_connector->base;
556
557 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
558
559 /* XXX: very likely not all of these are needed */
560 intel_encoder->hot_plug = intel_dsi_hot_plug;
561 intel_encoder->compute_config = intel_dsi_compute_config;
562 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
563 intel_encoder->pre_enable = intel_dsi_pre_enable;
564 intel_encoder->enable = intel_dsi_enable;
565 intel_encoder->mode_set = intel_dsi_mode_set;
566 intel_encoder->disable = intel_dsi_disable;
567 intel_encoder->post_disable = intel_dsi_post_disable;
568 intel_encoder->get_hw_state = intel_dsi_get_hw_state;
569 intel_encoder->get_config = intel_dsi_get_config;
570
571 intel_connector->get_hw_state = intel_connector_get_hw_state;
572
573 for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
574 dsi = &intel_dsi_devices[i];
575 intel_dsi->dev = *dsi;
576
577 if (dsi->dev_ops->init(&intel_dsi->dev))
578 break;
579 }
580
581 if (i == ARRAY_SIZE(intel_dsi_devices)) {
582 DRM_DEBUG_KMS("no device found\n");
583 goto err;
584 }
585
586 intel_encoder->type = INTEL_OUTPUT_DSI;
587 intel_encoder->crtc_mask = (1 << 0); /* XXX */
588
589 intel_encoder->cloneable = false;
590 drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
591 DRM_MODE_CONNECTOR_DSI);
592
593 drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
594
595 connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
596 connector->interlace_allowed = false;
597 connector->doublescan_allowed = false;
598
599 intel_connector_attach_encoder(intel_connector, intel_encoder);
600
601 drm_sysfs_connector_add(connector);
602
603 fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
604 if (!fixed_mode) {
605 DRM_DEBUG_KMS("no fixed mode\n");
606 goto err;
607 }
608
609 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
610 intel_panel_init(&intel_connector->panel, fixed_mode);
611
612 return true;
613
614err:
615 drm_encoder_cleanup(&intel_encoder->base);
616 kfree(intel_dsi);
617 kfree(intel_connector);
618
619 return false;
620}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 000000000000..c7765f33d524
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _INTEL_DSI_H
25#define _INTEL_DSI_H
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include "intel_drv.h"
30
31struct intel_dsi_device {
32 unsigned int panel_id;
33 const char *name;
34 int type;
35 const struct intel_dsi_dev_ops *dev_ops;
36 void *dev_priv;
37};
38
39struct intel_dsi_dev_ops {
40 bool (*init)(struct intel_dsi_device *dsi);
41
42 /* This callback must be able to assume DSI commands can be sent */
43 void (*enable)(struct intel_dsi_device *dsi);
44
45 /* This callback must be able to assume DSI commands can be sent */
46 void (*disable)(struct intel_dsi_device *dsi);
47
48 int (*mode_valid)(struct intel_dsi_device *dsi,
49 struct drm_display_mode *mode);
50
51 bool (*mode_fixup)(struct intel_dsi_device *dsi,
52 const struct drm_display_mode *mode,
53 struct drm_display_mode *adjusted_mode);
54
55 void (*mode_set)(struct intel_dsi_device *dsi,
56 struct drm_display_mode *mode,
57 struct drm_display_mode *adjusted_mode);
58
59 enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
60
61 bool (*get_hw_state)(struct intel_dsi_device *dev);
62
63 struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
64
65 void (*destroy) (struct intel_dsi_device *dsi);
66};
67
68struct intel_dsi {
69 struct intel_encoder base;
70
71 struct intel_dsi_device dev;
72
73 struct intel_connector *attached_connector;
74
75 /* if true, use HS mode, otherwise LP */
76 bool hs;
77
78 /* virtual channel */
79 int channel;
80
81 /* number of DSI lanes */
82 unsigned int lane_count;
83
84 /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
85 u32 pixel_format;
86
87 /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
88 u32 video_mode_format;
89
90 /* eot for MIPI_EOT_DISABLE register */
91 u32 eot_disable;
92};
93
94static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
95{
96 return container_of(encoder, struct intel_dsi, base.base);
97}
98
99extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
100extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
101
102#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 000000000000..7c40f981d2c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,427 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#include <linux/export.h>
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include <video/mipi_display.h>
30#include "i915_drv.h"
31#include "intel_drv.h"
32#include "intel_dsi.h"
33#include "intel_dsi_cmd.h"
34
35/*
36 * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
37 * MIPI_COMMAND_ADDRESS registers.
38 *
39 * Apparently these registers provide a MIPI adapter level way to send (lots of)
40 * commands and data to the receiver, without having to write the commands and
41 * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
42 *
43 * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
44 * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
45 * framebuffer in command mode displays) these are just an optimization that can
46 * come later.
47 *
48 * For memory writes, these should probably be used for performance.
49 */
50
51static void print_stat(struct intel_dsi *intel_dsi)
52{
53 struct drm_encoder *encoder = &intel_dsi->base.base;
54 struct drm_device *dev = encoder->dev;
55 struct drm_i915_private *dev_priv = dev->dev_private;
56 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
57 enum pipe pipe = intel_crtc->pipe;
58 u32 val;
59
60 val = I915_READ(MIPI_INTR_STAT(pipe));
61
62#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
63 DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
64 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
65 "\n", pipe, val,
66 STAT_BIT(val, TEARING_EFFECT),
67 STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
68 STAT_BIT(val, GEN_READ_DATA_AVAIL),
69 STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
70 STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
71 STAT_BIT(val, RX_PROT_VIOLATION),
72 STAT_BIT(val, RX_INVALID_TX_LENGTH),
73 STAT_BIT(val, ACK_WITH_NO_ERROR),
74 STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
75 STAT_BIT(val, LP_RX_TIMEOUT),
76 STAT_BIT(val, HS_TX_TIMEOUT),
77 STAT_BIT(val, DPI_FIFO_UNDERRUN),
78 STAT_BIT(val, LOW_CONTENTION),
79 STAT_BIT(val, HIGH_CONTENTION),
80 STAT_BIT(val, TXDSI_VC_ID_INVALID),
81 STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
82 STAT_BIT(val, TXCHECKSUM_ERROR),
83 STAT_BIT(val, TXECC_MULTIBIT_ERROR),
84 STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
85 STAT_BIT(val, TXFALSE_CONTROL_ERROR),
86 STAT_BIT(val, RXDSI_VC_ID_INVALID),
87 STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
88 STAT_BIT(val, RXCHECKSUM_ERROR),
89 STAT_BIT(val, RXECC_MULTIBIT_ERROR),
90 STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
91 STAT_BIT(val, RXFALSE_CONTROL_ERROR),
92 STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
93 STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
94 STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
95 STAT_BIT(val, RXEOT_SYNC_ERROR),
96 STAT_BIT(val, RXSOT_SYNC_ERROR),
97 STAT_BIT(val, RXSOT_ERROR));
98#undef STAT_BIT
99}
100
101enum dsi_type {
102 DSI_DCS,
103 DSI_GENERIC,
104};
105
106/* enable or disable command mode hs transmissions */
107void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
108{
109 struct drm_encoder *encoder = &intel_dsi->base.base;
110 struct drm_device *dev = encoder->dev;
111 struct drm_i915_private *dev_priv = dev->dev_private;
112 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
113 enum pipe pipe = intel_crtc->pipe;
114 u32 temp;
115 u32 mask = DBI_FIFO_EMPTY;
116
117 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
118 DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
119
120 temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
121 temp &= DBI_HS_LP_MODE_MASK;
122 I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
123
124 intel_dsi->hs = enable;
125}
126
127static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
128 u8 data_type, u16 data)
129{
130 struct drm_encoder *encoder = &intel_dsi->base.base;
131 struct drm_device *dev = encoder->dev;
132 struct drm_i915_private *dev_priv = dev->dev_private;
133 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
134 enum pipe pipe = intel_crtc->pipe;
135 u32 ctrl_reg;
136 u32 ctrl;
137 u32 mask;
138
139 DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
140 channel, data_type, data);
141
142 if (intel_dsi->hs) {
143 ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
144 mask = HS_CTRL_FIFO_FULL;
145 } else {
146 ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
147 mask = LP_CTRL_FIFO_FULL;
148 }
149
150 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
151 DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
152 print_stat(intel_dsi);
153 }
154
155 /*
156 * Note: This function is also used for long packets, with length passed
157 * as data, since SHORT_PACKET_PARAM_SHIFT ==
158 * LONG_PACKET_WORD_COUNT_SHIFT.
159 */
160 ctrl = data << SHORT_PACKET_PARAM_SHIFT |
161 channel << VIRTUAL_CHANNEL_SHIFT |
162 data_type << DATA_TYPE_SHIFT;
163
164 I915_WRITE(ctrl_reg, ctrl);
165
166 return 0;
167}
168
169static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
170 u8 data_type, const u8 *data, int len)
171{
172 struct drm_encoder *encoder = &intel_dsi->base.base;
173 struct drm_device *dev = encoder->dev;
174 struct drm_i915_private *dev_priv = dev->dev_private;
175 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
176 enum pipe pipe = intel_crtc->pipe;
177 u32 data_reg;
178 int i, j, n;
179 u32 mask;
180
181 DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
182 channel, data_type, len);
183
184 if (intel_dsi->hs) {
185 data_reg = MIPI_HS_GEN_DATA(pipe);
186 mask = HS_DATA_FIFO_FULL;
187 } else {
188 data_reg = MIPI_LP_GEN_DATA(pipe);
189 mask = LP_DATA_FIFO_FULL;
190 }
191
192 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
193 DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
194
195 for (i = 0; i < len; i += n) {
196 u32 val = 0;
197 n = min_t(int, len - i, 4);
198
199 for (j = 0; j < n; j++)
200 val |= *data++ << 8 * j;
201
202 I915_WRITE(data_reg, val);
203 /* XXX: check for data fifo full, once that is set, write 4
204 * dwords, then wait for not set, then continue. */
205 }
206
207 return dsi_vc_send_short(intel_dsi, channel, data_type, len);
208}
209
210static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
211 int channel, const u8 *data, int len,
212 enum dsi_type type)
213{
214 int ret;
215
216 if (len == 0) {
217 BUG_ON(type == DSI_GENERIC);
218 ret = dsi_vc_send_short(intel_dsi, channel,
219 MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
220 0);
221 } else if (len == 1) {
222 ret = dsi_vc_send_short(intel_dsi, channel,
223 type == DSI_GENERIC ?
224 MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
225 MIPI_DSI_DCS_SHORT_WRITE, data[0]);
226 } else if (len == 2) {
227 ret = dsi_vc_send_short(intel_dsi, channel,
228 type == DSI_GENERIC ?
229 MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
230 MIPI_DSI_DCS_SHORT_WRITE_PARAM,
231 (data[1] << 8) | data[0]);
232 } else {
233 ret = dsi_vc_send_long(intel_dsi, channel,
234 type == DSI_GENERIC ?
235 MIPI_DSI_GENERIC_LONG_WRITE :
236 MIPI_DSI_DCS_LONG_WRITE, data, len);
237 }
238
239 return ret;
240}
241
242int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
243 const u8 *data, int len)
244{
245 return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
246}
247
248int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
249 const u8 *data, int len)
250{
251 return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
252}
253
254static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
255 int channel, u8 dcs_cmd)
256{
257 return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
258 dcs_cmd);
259}
260
261static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
262 int channel, u8 *reqdata,
263 int reqlen)
264{
265 u16 data;
266 u8 data_type;
267
268 switch (reqlen) {
269 case 0:
270 data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
271 data = 0;
272 break;
273 case 1:
274 data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
275 data = reqdata[0];
276 break;
277 case 2:
278 data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
279 data = (reqdata[1] << 8) | reqdata[0];
280 break;
281 default:
282 BUG();
283 }
284
285 return dsi_vc_send_short(intel_dsi, channel, data_type, data);
286}
287
288static int dsi_read_data_return(struct intel_dsi *intel_dsi,
289 u8 *buf, int buflen)
290{
291 struct drm_encoder *encoder = &intel_dsi->base.base;
292 struct drm_device *dev = encoder->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
295 enum pipe pipe = intel_crtc->pipe;
296 int i, len = 0;
297 u32 data_reg, val;
298
299 if (intel_dsi->hs) {
300 data_reg = MIPI_HS_GEN_DATA(pipe);
301 } else {
302 data_reg = MIPI_LP_GEN_DATA(pipe);
303 }
304
305 while (len < buflen) {
306 val = I915_READ(data_reg);
307 for (i = 0; i < 4 && len < buflen; i++, len++)
308 buf[len] = val >> 8 * i;
309 }
310
311 return len;
312}
313
314int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
315 u8 *buf, int buflen)
316{
317 struct drm_encoder *encoder = &intel_dsi->base.base;
318 struct drm_device *dev = encoder->dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
321 enum pipe pipe = intel_crtc->pipe;
322 u32 mask;
323 int ret;
324
325 /*
326 * XXX: should issue multiple read requests and reads if request is
327 * longer than MIPI_MAX_RETURN_PKT_SIZE
328 */
329
330 I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
331
332 ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
333 if (ret)
334 return ret;
335
336 mask = GEN_READ_DATA_AVAIL;
337 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
338 DRM_ERROR("Timeout waiting for read data.\n");
339
340 ret = dsi_read_data_return(intel_dsi, buf, buflen);
341 if (ret < 0)
342 return ret;
343
344 if (ret != buflen)
345 return -EIO;
346
347 return 0;
348}
349
350int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
351 u8 *reqdata, int reqlen, u8 *buf, int buflen)
352{
353 struct drm_encoder *encoder = &intel_dsi->base.base;
354 struct drm_device *dev = encoder->dev;
355 struct drm_i915_private *dev_priv = dev->dev_private;
356 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
357 enum pipe pipe = intel_crtc->pipe;
358 u32 mask;
359 int ret;
360
361 /*
362 * XXX: should issue multiple read requests and reads if request is
363 * longer than MIPI_MAX_RETURN_PKT_SIZE
364 */
365
366 I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
367
368 ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
369 reqlen);
370 if (ret)
371 return ret;
372
373 mask = GEN_READ_DATA_AVAIL;
374 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
375 DRM_ERROR("Timeout waiting for read data.\n");
376
377 ret = dsi_read_data_return(intel_dsi, buf, buflen);
378 if (ret < 0)
379 return ret;
380
381 if (ret != buflen)
382 return -EIO;
383
384 return 0;
385}
386
387/*
388 * send a video mode command
389 *
390 * XXX: commands with data in MIPI_DPI_DATA?
391 */
392int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
393{
394 struct drm_encoder *encoder = &intel_dsi->base.base;
395 struct drm_device *dev = encoder->dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
398 enum pipe pipe = intel_crtc->pipe;
399 u32 mask;
400
401 /* XXX: pipe, hs */
402 if (intel_dsi->hs)
403 cmd &= ~DPI_LP_MODE;
404 else
405 cmd |= DPI_LP_MODE;
406
407 /* DPI virtual channel?! */
408
409 mask = DPI_FIFO_EMPTY;
410 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
411 DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
412
413 /* clear bit */
414 I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
415
416 /* XXX: old code skips write if control unchanged */
417 if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
418 DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
419
420 I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
421
422 mask = SPL_PKT_SENT_INTERRUPT;
423 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
424 DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
425
426 return 0;
427}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 000000000000..54c8a234a2e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#ifndef _INTEL_DSI_DSI_H
27#define _INTEL_DSI_DSI_H
28
29#include <drm/drmP.h>
30#include <drm/drm_crtc.h>
31#include <video/mipi_display.h>
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include "intel_dsi.h"
35
36void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
37
38int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
39 const u8 *data, int len);
40
41int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
42 const u8 *data, int len);
43
44int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
45 u8 *buf, int buflen);
46
47int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
48 u8 *reqdata, int reqlen, u8 *buf, int buflen);
49
50int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
51
52/* XXX: questionable write helpers */
53static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
54 int channel, u8 dcs_cmd)
55{
56 return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
57}
58
59static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
60 int channel, u8 dcs_cmd, u8 param)
61{
62 u8 buf[2] = { dcs_cmd, param };
63 return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
64}
65
66static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
67 int channel)
68{
69 return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
70}
71
72static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
73 int channel, u8 param)
74{
75 return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
76}
77
78static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
79 int channel, u8 param1, u8 param2)
80{
81 u8 buf[2] = { param1, param2 };
82 return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
83}
84
85/* XXX: questionable read helpers */
86static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
87 int channel, u8 *buf, int buflen)
88{
89 return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
90}
91
92static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
93 int channel, u8 param, u8 *buf,
94 int buflen)
95{
96 return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
97}
98
99static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
100 int channel, u8 param1, u8 param2,
101 u8 *buf, int buflen)
102{
103 u8 req[2] = { param1, param2 };
104
105 return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
106}
107
108
109#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 000000000000..44279b2ade88
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Shobhit Kumar <shobhit.kumar@intel.com>
25 * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
26 */
27
28#include <linux/kernel.h>
29#include "intel_drv.h"
30#include "i915_drv.h"
31#include "intel_dsi.h"
32
33#define DSI_HSS_PACKET_SIZE 4
34#define DSI_HSE_PACKET_SIZE 4
35#define DSI_HSA_PACKET_EXTRA_SIZE 6
36#define DSI_HBP_PACKET_EXTRA_SIZE 6
37#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
38#define DSI_HFP_PACKET_EXTRA_SIZE 6
39#define DSI_EOTP_PACKET_SIZE 4
40
41struct dsi_mnp {
42 u32 dsi_pll_ctrl;
43 u32 dsi_pll_div;
44};
45
46static const u32 lfsr_converts[] = {
47 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
48 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
49 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
50 71, 35 /* 91 - 92 */
51};
52
53static u32 dsi_rr_formula(const struct drm_display_mode *mode,
54 int pixel_format, int video_mode_format,
55 int lane_count, bool eotp)
56{
57 u32 bpp;
58 u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
59 u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
60 u32 bytes_per_line, bytes_per_frame;
61 u32 num_frames;
62 u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
63 u32 dsi_bit_clock_hz;
64 u32 dsi_clk;
65
66 switch (pixel_format) {
67 default:
68 case VID_MODE_FORMAT_RGB888:
69 case VID_MODE_FORMAT_RGB666_LOOSE:
70 bpp = 24;
71 break;
72 case VID_MODE_FORMAT_RGB666:
73 bpp = 18;
74 break;
75 case VID_MODE_FORMAT_RGB565:
76 bpp = 16;
77 break;
78 }
79
80 hactive = mode->hdisplay;
81 vactive = mode->vdisplay;
82 hfp = mode->hsync_start - mode->hdisplay;
83 hsync = mode->hsync_end - mode->hsync_start;
84 hbp = mode->htotal - mode->hsync_end;
85
86 vfp = mode->vsync_start - mode->vdisplay;
87 vsync = mode->vsync_end - mode->vsync_start;
88 vbp = mode->vtotal - mode->vsync_end;
89
90 hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
91 hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
92 hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
93 hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
94
95 bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
96 DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
97 hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
98 hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
99 hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
100
101 /*
102 * XXX: Need to accurately calculate LP to HS transition timeout and add
103 * it to bytes_per_line/bytes_per_frame.
104 */
105
106 if (eotp && video_mode_format == VIDEO_MODE_BURST)
107 bytes_per_line += DSI_EOTP_PACKET_SIZE;
108
109 bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
110 vactive * bytes_per_line + vfp * bytes_per_line;
111
112 if (eotp &&
113 (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
114 video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
115 bytes_per_frame += DSI_EOTP_PACKET_SIZE;
116
117 num_frames = drm_mode_vrefresh(mode);
118 bytes_per_x_frames = num_frames * bytes_per_frame;
119
120 bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
121
122 /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
123 dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
124 dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
125
126 if (eotp && video_mode_format == VIDEO_MODE_BURST)
127 dsi_clk *= 2;
128
129 return dsi_clk;
130}
131
132#ifdef MNP_FROM_TABLE
133
134struct dsi_clock_table {
135 u32 freq;
136 u8 m;
137 u8 p;
138};
139
140static const struct dsi_clock_table dsi_clk_tbl[] = {
141 {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
142 {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
143 {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
144 {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
145 {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
146 {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
147 {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
148 {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
149 {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
150 {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
151 {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
152 {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
153 {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
154 {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
155 {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
156 {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
157 {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
158 {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
159 {1000, 80, 2}, /* dsi clock frequency in Mhz*/
160};
161
162static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
163{
164 unsigned int i;
165 u8 m;
166 u8 n;
167 u8 p;
168 u32 m_seed;
169
170 if (dsi_clk < 300 || dsi_clk > 1000)
171 return -ECHRNG;
172
173 for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
174 if (dsi_clk_tbl[i].freq > dsi_clk)
175 break;
176 }
177
178 m = dsi_clk_tbl[i].m;
179 p = dsi_clk_tbl[i].p;
180 m_seed = lfsr_converts[m - 62];
181 n = 1;
182 dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
183 dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
184 m_seed << DSI_PLL_M1_DIV_SHIFT;
185
186 return 0;
187}
188
189#else
190
191static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
192{
193 u32 m, n, p;
194 u32 ref_clk;
195 u32 error;
196 u32 tmp_error;
197 u32 target_dsi_clk;
198 u32 calc_dsi_clk;
199 u32 calc_m;
200 u32 calc_p;
201 u32 m_seed;
202
203 if (dsi_clk < 300 || dsi_clk > 1150) {
204 DRM_ERROR("DSI CLK Out of Range\n");
205 return -ECHRNG;
206 }
207
208 ref_clk = 25000;
209 target_dsi_clk = dsi_clk * 1000;
210 error = 0xFFFFFFFF;
211 calc_m = 0;
212 calc_p = 0;
213
214 for (m = 62; m <= 92; m++) {
215 for (p = 2; p <= 6; p++) {
216
217 calc_dsi_clk = (m * ref_clk) / p;
218 if (calc_dsi_clk >= target_dsi_clk) {
219 tmp_error = calc_dsi_clk - target_dsi_clk;
220 if (tmp_error < error) {
221 error = tmp_error;
222 calc_m = m;
223 calc_p = p;
224 }
225 }
226 }
227 }
228
229 m_seed = lfsr_converts[calc_m - 62];
230 n = 1;
231 dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
232 dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
233 m_seed << DSI_PLL_M1_DIV_SHIFT;
234
235 return 0;
236}
237
238#endif
239
240/*
241 * XXX: The muxing and gating is hard coded for now. Need to add support for
242 * sharing PLLs with two DSI outputs.
243 */
244static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
245{
246 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
247 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
248 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
249 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
250 int ret;
251 struct dsi_mnp dsi_mnp;
252 u32 dsi_clk;
253
254 dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
255 intel_dsi->video_mode_format,
256 intel_dsi->lane_count, !intel_dsi->eot_disable);
257
258 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
259 if (ret) {
260 DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
261 return;
262 }
263
264 dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
265
266 DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
267 dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
268
269 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
270 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
271 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
272}
273
274void vlv_enable_dsi_pll(struct intel_encoder *encoder)
275{
276 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
277 u32 tmp;
278
279 DRM_DEBUG_KMS("\n");
280
281 mutex_lock(&dev_priv->dpio_lock);
282
283 vlv_configure_dsi_pll(encoder);
284
285 /* wait at least 0.5 us after ungating before enabling VCO */
286 usleep_range(1, 10);
287
288 tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
289 tmp |= DSI_PLL_VCO_EN;
290 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
291
292 mutex_unlock(&dev_priv->dpio_lock);
293
294 if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
295 DRM_ERROR("DSI PLL lock failed\n");
296 return;
297 }
298
299 DRM_DEBUG_KMS("DSI PLL locked\n");
300}
301
302void vlv_disable_dsi_pll(struct intel_encoder *encoder)
303{
304 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
305 u32 tmp;
306
307 DRM_DEBUG_KMS("\n");
308
309 mutex_lock(&dev_priv->dpio_lock);
310
311 tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
312 tmp &= ~DSI_PLL_VCO_EN;
313 tmp |= DSI_PLL_LDO_GATE;
314 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
315
316 mutex_unlock(&dev_priv->dpio_lock);
317}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fa7df546c1e..1b64145c669a 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
153 flags |= DRM_MODE_FLAG_NVSYNC; 153 flags |= DRM_MODE_FLAG_NVSYNC;
154 154
155 pipe_config->adjusted_mode.flags |= flags; 155 pipe_config->adjusted_mode.flags |= flags;
156
157 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
156} 158}
157 159
158static void intel_disable_dvo(struct intel_encoder *encoder) 160static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -267,11 +269,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
267 drm_mode_set_crtcinfo(adjusted_mode, 0); 269 drm_mode_set_crtcinfo(adjusted_mode, 0);
268 } 270 }
269 271
270 if (intel_dvo->dev.dev_ops->mode_fixup)
271 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
272 &pipe_config->requested_mode,
273 adjusted_mode);
274
275 return true; 272 return true;
276} 273}
277 274
@@ -370,7 +367,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
370 367
371static void intel_dvo_destroy(struct drm_connector *connector) 368static void intel_dvo_destroy(struct drm_connector *connector)
372{ 369{
373 drm_sysfs_connector_remove(connector);
374 drm_connector_cleanup(connector); 370 drm_connector_cleanup(connector);
375 kfree(connector); 371 kfree(connector);
376} 372}
@@ -451,11 +447,11 @@ void intel_dvo_init(struct drm_device *dev)
451 int i; 447 int i;
452 int encoder_type = DRM_MODE_ENCODER_NONE; 448 int encoder_type = DRM_MODE_ENCODER_NONE;
453 449
454 intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL); 450 intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
455 if (!intel_dvo) 451 if (!intel_dvo)
456 return; 452 return;
457 453
458 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 454 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
459 if (!intel_connector) { 455 if (!intel_connector) {
460 kfree(intel_dvo); 456 kfree(intel_dvo);
461 return; 457 return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c
index bc2100007b21..895fcb4fbd94 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -78,8 +78,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
78 mode_cmd.width = sizes->surface_width; 78 mode_cmd.width = sizes->surface_width;
79 mode_cmd.height = sizes->surface_height; 79 mode_cmd.height = sizes->surface_height;
80 80
81 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / 81 mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
82 8), 64); 82 DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
83 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 83 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
84 sizes->surface_depth); 84 sizes->surface_depth);
85 85
@@ -184,6 +184,27 @@ out:
184 return ret; 184 return ret;
185} 185}
186 186
187/** Sets the color ramps on behalf of RandR */
188static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
189 u16 blue, int regno)
190{
191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
192
193 intel_crtc->lut_r[regno] = red >> 8;
194 intel_crtc->lut_g[regno] = green >> 8;
195 intel_crtc->lut_b[regno] = blue >> 8;
196}
197
198static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
199 u16 *blue, int regno)
200{
201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
202
203 *red = intel_crtc->lut_r[regno] << 8;
204 *green = intel_crtc->lut_g[regno] << 8;
205 *blue = intel_crtc->lut_b[regno] << 8;
206}
207
187static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 208static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
188 .gamma_set = intel_crtc_fb_gamma_set, 209 .gamma_set = intel_crtc_fb_gamma_set,
189 .gamma_get = intel_crtc_fb_gamma_get, 210 .gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
216 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
217 int ret; 238 int ret;
218 239
219 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); 240 ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
220 if (!ifbdev) 241 if (!ifbdev)
221 return -ENOMEM; 242 return -ENOMEM;
222 243
@@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
225 246
226 ret = drm_fb_helper_init(dev, &ifbdev->helper, 247 ret = drm_fb_helper_init(dev, &ifbdev->helper,
227 INTEL_INFO(dev)->num_pipes, 248 INTEL_INFO(dev)->num_pipes,
228 INTELFB_CONN_LIMIT); 249 4);
229 if (ret) { 250 if (ret) {
230 kfree(ifbdev); 251 kfree(ifbdev);
231 return ret; 252 return ret;
@@ -278,13 +299,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
278 299
279MODULE_LICENSE("GPL and additional rights"); 300MODULE_LICENSE("GPL and additional rights");
280 301
281void intel_fb_output_poll_changed(struct drm_device *dev) 302void intel_fbdev_output_poll_changed(struct drm_device *dev)
282{ 303{
283 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
284 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 305 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
285} 306}
286 307
287void intel_fb_restore_mode(struct drm_device *dev) 308void intel_fbdev_restore_mode(struct drm_device *dev)
288{ 309{
289 int ret; 310 int ret;
290 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4148cc85bf7f..51a8336dec2e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -713,6 +713,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
713 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 713 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
714 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 714 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
715 u32 tmp, flags = 0; 715 u32 tmp, flags = 0;
716 int dotclock;
716 717
717 tmp = I915_READ(intel_hdmi->hdmi_reg); 718 tmp = I915_READ(intel_hdmi->hdmi_reg);
718 719
@@ -727,6 +728,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
727 flags |= DRM_MODE_FLAG_NVSYNC; 728 flags |= DRM_MODE_FLAG_NVSYNC;
728 729
729 pipe_config->adjusted_mode.flags |= flags; 730 pipe_config->adjusted_mode.flags |= flags;
731
732 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
733 dotclock = pipe_config->port_clock * 2 / 3;
734 else
735 dotclock = pipe_config->port_clock;
736
737 if (HAS_PCH_SPLIT(dev_priv->dev))
738 ironlake_check_encoder_dotclock(pipe_config, dotclock);
739
740 pipe_config->adjusted_mode.crtc_clock = dotclock;
730} 741}
731 742
732static void intel_enable_hdmi(struct intel_encoder *encoder) 743static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -862,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
862 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 873 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
863 struct drm_device *dev = encoder->base.dev; 874 struct drm_device *dev = encoder->base.dev;
864 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 875 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
865 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; 876 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
866 int portclock_limit = hdmi_portclock_limit(intel_hdmi); 877 int portclock_limit = hdmi_portclock_limit(intel_hdmi);
867 int desired_bpp; 878 int desired_bpp;
868 879
@@ -904,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
904 pipe_config->pipe_bpp = desired_bpp; 915 pipe_config->pipe_bpp = desired_bpp;
905 } 916 }
906 917
907 if (adjusted_mode->clock > portclock_limit) { 918 if (adjusted_mode->crtc_clock > portclock_limit) {
908 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); 919 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
909 return false; 920 return false;
910 } 921 }
@@ -1063,7 +1074,7 @@ done:
1063 return 0; 1074 return 0;
1064} 1075}
1065 1076
1066static void intel_hdmi_pre_enable(struct intel_encoder *encoder) 1077static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1067{ 1078{
1068 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1079 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1069 struct drm_device *dev = encoder->base.dev; 1080 struct drm_device *dev = encoder->base.dev;
@@ -1079,35 +1090,35 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1079 1090
1080 /* Enable clock channels for this port */ 1091 /* Enable clock channels for this port */
1081 mutex_lock(&dev_priv->dpio_lock); 1092 mutex_lock(&dev_priv->dpio_lock);
1082 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1093 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1083 val = 0; 1094 val = 0;
1084 if (pipe) 1095 if (pipe)
1085 val |= (1<<21); 1096 val |= (1<<21);
1086 else 1097 else
1087 val &= ~(1<<21); 1098 val &= ~(1<<21);
1088 val |= 0x001000c4; 1099 val |= 0x001000c4;
1089 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1100 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1090 1101
1091 /* HDMI 1.0V-2dB */ 1102 /* HDMI 1.0V-2dB */
1092 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0); 1103 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
1093 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), 1104 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
1094 0x2b245f5f); 1105 0x2b245f5f);
1095 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 1106 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
1096 0x5578b83a); 1107 0x5578b83a);
1097 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 1108 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
1098 0x0c782040); 1109 0x0c782040);
1099 vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port), 1110 vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
1100 0x2b247878); 1111 0x2b247878);
1101 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 1112 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
1102 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1113 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
1103 0x00002000); 1114 0x00002000);
1104 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1115 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
1105 DPIO_TX_OCALINIT_EN); 1116 DPIO_TX_OCALINIT_EN);
1106 1117
1107 /* Program lane clock */ 1118 /* Program lane clock */
1108 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 1119 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
1109 0x00760018); 1120 0x00760018);
1110 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1121 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
1111 0x00400888); 1122 0x00400888);
1112 mutex_unlock(&dev_priv->dpio_lock); 1123 mutex_unlock(&dev_priv->dpio_lock);
1113 1124
@@ -1116,55 +1127,60 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1116 vlv_wait_port_ready(dev_priv, port); 1127 vlv_wait_port_ready(dev_priv, port);
1117} 1128}
1118 1129
1119static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1130static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1120{ 1131{
1121 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1132 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1122 struct drm_device *dev = encoder->base.dev; 1133 struct drm_device *dev = encoder->base.dev;
1123 struct drm_i915_private *dev_priv = dev->dev_private; 1134 struct drm_i915_private *dev_priv = dev->dev_private;
1135 struct intel_crtc *intel_crtc =
1136 to_intel_crtc(encoder->base.crtc);
1124 int port = vlv_dport_to_channel(dport); 1137 int port = vlv_dport_to_channel(dport);
1138 int pipe = intel_crtc->pipe;
1125 1139
1126 if (!IS_VALLEYVIEW(dev)) 1140 if (!IS_VALLEYVIEW(dev))
1127 return; 1141 return;
1128 1142
1129 /* Program Tx lane resets to default */ 1143 /* Program Tx lane resets to default */
1130 mutex_lock(&dev_priv->dpio_lock); 1144 mutex_lock(&dev_priv->dpio_lock);
1131 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1145 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1132 DPIO_PCS_TX_LANE2_RESET | 1146 DPIO_PCS_TX_LANE2_RESET |
1133 DPIO_PCS_TX_LANE1_RESET); 1147 DPIO_PCS_TX_LANE1_RESET);
1134 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1148 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1135 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1149 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1136 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1150 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1137 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1151 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1138 DPIO_PCS_CLK_SOFT_RESET); 1152 DPIO_PCS_CLK_SOFT_RESET);
1139 1153
1140 /* Fix up inter-pair skew failure */ 1154 /* Fix up inter-pair skew failure */
1141 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1155 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1142 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1156 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1143 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1157 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1144 1158
1145 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1159 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
1146 0x00002000); 1160 0x00002000);
1147 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1161 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
1148 DPIO_TX_OCALINIT_EN); 1162 DPIO_TX_OCALINIT_EN);
1149 mutex_unlock(&dev_priv->dpio_lock); 1163 mutex_unlock(&dev_priv->dpio_lock);
1150} 1164}
1151 1165
1152static void intel_hdmi_post_disable(struct intel_encoder *encoder) 1166static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1153{ 1167{
1154 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1168 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1155 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1169 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1170 struct intel_crtc *intel_crtc =
1171 to_intel_crtc(encoder->base.crtc);
1156 int port = vlv_dport_to_channel(dport); 1172 int port = vlv_dport_to_channel(dport);
1173 int pipe = intel_crtc->pipe;
1157 1174
1158 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1175 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1159 mutex_lock(&dev_priv->dpio_lock); 1176 mutex_lock(&dev_priv->dpio_lock);
1160 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000); 1177 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
1161 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060); 1178 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
1162 mutex_unlock(&dev_priv->dpio_lock); 1179 mutex_unlock(&dev_priv->dpio_lock);
1163} 1180}
1164 1181
1165static void intel_hdmi_destroy(struct drm_connector *connector) 1182static void intel_hdmi_destroy(struct drm_connector *connector)
1166{ 1183{
1167 drm_sysfs_connector_remove(connector);
1168 drm_connector_cleanup(connector); 1184 drm_connector_cleanup(connector);
1169 kfree(connector); 1185 kfree(connector);
1170} 1186}
@@ -1211,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1211 1227
1212 connector->interlace_allowed = 1; 1228 connector->interlace_allowed = 1;
1213 connector->doublescan_allowed = 0; 1229 connector->doublescan_allowed = 0;
1230 connector->stereo_allowed = 1;
1214 1231
1215 switch (port) { 1232 switch (port) {
1216 case PORT_B: 1233 case PORT_B:
@@ -1275,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1275 struct intel_encoder *intel_encoder; 1292 struct intel_encoder *intel_encoder;
1276 struct intel_connector *intel_connector; 1293 struct intel_connector *intel_connector;
1277 1294
1278 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1295 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1279 if (!intel_dig_port) 1296 if (!intel_dig_port)
1280 return; 1297 return;
1281 1298
1282 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1299 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1283 if (!intel_connector) { 1300 if (!intel_connector) {
1284 kfree(intel_dig_port); 1301 kfree(intel_dig_port);
1285 return; 1302 return;
@@ -1296,10 +1313,10 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1296 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1313 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1297 intel_encoder->get_config = intel_hdmi_get_config; 1314 intel_encoder->get_config = intel_hdmi_get_config;
1298 if (IS_VALLEYVIEW(dev)) { 1315 if (IS_VALLEYVIEW(dev)) {
1299 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; 1316 intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
1300 intel_encoder->pre_enable = intel_hdmi_pre_enable; 1317 intel_encoder->pre_enable = vlv_hdmi_pre_enable;
1301 intel_encoder->enable = vlv_enable_hdmi; 1318 intel_encoder->enable = vlv_enable_hdmi;
1302 intel_encoder->post_disable = intel_hdmi_post_disable; 1319 intel_encoder->post_disable = vlv_hdmi_post_disable;
1303 } else { 1320 } else {
1304 intel_encoder->enable = intel_enable_hdmi; 1321 intel_encoder->enable = intel_enable_hdmi;
1305 } 1322 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7f262..2ca17b14b6c1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
34#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37enum disp_clk {
38 CDCLK,
39 CZCLK
40};
41
37struct gmbus_port { 42struct gmbus_port {
38 const char *name; 43 const char *name;
39 int reg; 44 int reg;
@@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
58 return container_of(i2c, struct intel_gmbus, adapter); 63 return container_of(i2c, struct intel_gmbus, adapter);
59} 64}
60 65
66static int get_disp_clk_div(struct drm_i915_private *dev_priv,
67 enum disp_clk clk)
68{
69 u32 reg_val;
70 int clk_ratio;
71
72 reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
73
74 if (clk == CDCLK)
75 clk_ratio =
76 ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
77 else
78 clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
79
80 return clk_ratio;
81}
82
83static void gmbus_set_freq(struct drm_i915_private *dev_priv)
84{
85 int vco_freq[] = { 800, 1600, 2000, 2400 };
86 int gmbus_freq = 0, cdclk_div, hpll_freq;
87
88 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
89
90 /* Skip setting the gmbus freq if BIOS has already programmed it */
91 if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
92 return;
93
94 /* Obtain SKU information */
95 mutex_lock(&dev_priv->dpio_lock);
96 hpll_freq =
97 vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
98 mutex_unlock(&dev_priv->dpio_lock);
99
100 /* Get the CDCLK divide ratio */
101 cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
102
103 /*
104 * Program the gmbus_freq based on the cdclk frequency.
105 * BSpec erroneously claims we should aim for 4MHz, but
106 * in fact 1MHz is the correct frequency.
107 */
108 if (cdclk_div)
109 gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
110
111 if (WARN_ON(gmbus_freq == 0))
112 return;
113
114 I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
115}
116
61void 117void
62intel_i2c_reset(struct drm_device *dev) 118intel_i2c_reset(struct drm_device *dev)
63{ 119{
64 struct drm_i915_private *dev_priv = dev->dev_private; 120 struct drm_i915_private *dev_priv = dev->dev_private;
121
122 /*
123 * In BIOS-less system, program the correct gmbus frequency
124 * before reading edid.
125 */
126 if (IS_VALLEYVIEW(dev))
127 gmbus_set_freq(dev_priv);
128
65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 129 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 130 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
67} 131}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b8af94a5be39..b0ef55833087 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
92 struct drm_device *dev = encoder->base.dev; 92 struct drm_device *dev = encoder->base.dev;
93 struct drm_i915_private *dev_priv = dev->dev_private; 93 struct drm_i915_private *dev_priv = dev->dev_private;
94 u32 lvds_reg, tmp, flags = 0; 94 u32 lvds_reg, tmp, flags = 0;
95 int dotclock;
95 96
96 if (HAS_PCH_SPLIT(dev)) 97 if (HAS_PCH_SPLIT(dev))
97 lvds_reg = PCH_LVDS; 98 lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
116 117
117 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; 118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
118 } 119 }
120
121 dotclock = pipe_config->port_clock;
122
123 if (HAS_PCH_SPLIT(dev_priv->dev))
124 ironlake_check_encoder_dotclock(pipe_config, dotclock);
125
126 pipe_config->adjusted_mode.crtc_clock = dotclock;
119} 127}
120 128
121/* The LVDS pin pair needs to be on before the DPLLs are enabled. 129/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -466,7 +474,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
466 474
467 intel_panel_fini(&lvds_connector->base.panel); 475 intel_panel_fini(&lvds_connector->base.panel);
468 476
469 drm_sysfs_connector_remove(connector);
470 drm_connector_cleanup(connector); 477 drm_connector_cleanup(connector);
471 kfree(connector); 478 kfree(connector);
472} 479}
@@ -802,7 +809,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
802 return true; 809 return true;
803 810
804 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 811 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
805 struct child_device_config *child = dev_priv->vbt.child_dev + i; 812 union child_device_config *uchild = dev_priv->vbt.child_dev + i;
813 struct old_child_dev_config *child = &uchild->old;
806 814
807 /* If the device type is not LFP, continue. 815 /* If the device type is not LFP, continue.
808 * We have to check both the new identifiers as well as the 816 * We have to check both the new identifiers as well as the
@@ -956,11 +964,11 @@ void intel_lvds_init(struct drm_device *dev)
956 } 964 }
957 } 965 }
958 966
959 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); 967 lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
960 if (!lvds_encoder) 968 if (!lvds_encoder)
961 return; 969 return;
962 970
963 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); 971 lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
964 if (!lvds_connector) { 972 if (!lvds_connector) {
965 kfree(lvds_encoder); 973 kfree(lvds_encoder);
966 return; 974 return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 119771ff46ab..b82050c96f3e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -36,8 +36,11 @@
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_drv.h" 37#include "intel_drv.h"
38 38
39#define PCI_ASLE 0xe4 39#define PCI_ASLE 0xe4
40#define PCI_ASLS 0xfc 40#define PCI_ASLS 0xfc
41#define PCI_SWSCI 0xe8
42#define PCI_SWSCI_SCISEL (1 << 15)
43#define PCI_SWSCI_GSSCIE (1 << 0)
41 44
42#define OPREGION_HEADER_OFFSET 0 45#define OPREGION_HEADER_OFFSET 0
43#define OPREGION_ACPI_OFFSET 0x100 46#define OPREGION_ACPI_OFFSET 0x100
@@ -107,25 +110,38 @@ struct opregion_asle {
107 u32 epfm; /* enabled panel fitting modes */ 110 u32 epfm; /* enabled panel fitting modes */
108 u8 plut[74]; /* panel LUT and identifier */ 111 u8 plut[74]; /* panel LUT and identifier */
109 u32 pfmb; /* PWM freq and min brightness */ 112 u32 pfmb; /* PWM freq and min brightness */
110 u8 rsvd[102]; 113 u32 cddv; /* color correction default values */
114 u32 pcft; /* power conservation features */
115 u32 srot; /* supported rotation angles */
116 u32 iuer; /* IUER events */
117 u8 rsvd[86];
111} __attribute__((packed)); 118} __attribute__((packed));
112 119
113/* Driver readiness indicator */ 120/* Driver readiness indicator */
114#define ASLE_ARDY_READY (1 << 0) 121#define ASLE_ARDY_READY (1 << 0)
115#define ASLE_ARDY_NOT_READY (0 << 0) 122#define ASLE_ARDY_NOT_READY (0 << 0)
116 123
117/* ASLE irq request bits */ 124/* ASLE Interrupt Command (ASLC) bits */
118#define ASLE_SET_ALS_ILLUM (1 << 0) 125#define ASLC_SET_ALS_ILLUM (1 << 0)
119#define ASLE_SET_BACKLIGHT (1 << 1) 126#define ASLC_SET_BACKLIGHT (1 << 1)
120#define ASLE_SET_PFIT (1 << 2) 127#define ASLC_SET_PFIT (1 << 2)
121#define ASLE_SET_PWM_FREQ (1 << 3) 128#define ASLC_SET_PWM_FREQ (1 << 3)
122#define ASLE_REQ_MSK 0xf 129#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
123 130#define ASLC_BUTTON_ARRAY (1 << 5)
124/* response bits of ASLE irq request */ 131#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
125#define ASLE_ALS_ILLUM_FAILED (1<<10) 132#define ASLC_DOCKING_INDICATOR (1 << 7)
126#define ASLE_BACKLIGHT_FAILED (1<<12) 133#define ASLC_ISCT_STATE_CHANGE (1 << 8)
127#define ASLE_PFIT_FAILED (1<<14) 134#define ASLC_REQ_MSK 0x1ff
128#define ASLE_PWM_FREQ_FAILED (1<<16) 135/* response bits */
136#define ASLC_ALS_ILLUM_FAILED (1 << 10)
137#define ASLC_BACKLIGHT_FAILED (1 << 12)
138#define ASLC_PFIT_FAILED (1 << 14)
139#define ASLC_PWM_FREQ_FAILED (1 << 16)
140#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
141#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
142#define ASLC_CONVERTIBLE_FAILED (1 << 22)
143#define ASLC_DOCKING_FAILED (1 << 24)
144#define ASLC_ISCT_STATE_FAILED (1 << 26)
129 145
130/* Technology enabled indicator */ 146/* Technology enabled indicator */
131#define ASLE_TCHE_ALS_EN (1 << 0) 147#define ASLE_TCHE_ALS_EN (1 << 0)
@@ -151,6 +167,60 @@ struct opregion_asle {
151 167
152#define ASLE_CBLV_VALID (1<<31) 168#define ASLE_CBLV_VALID (1<<31)
153 169
170/* IUER */
171#define ASLE_IUER_DOCKING (1 << 7)
172#define ASLE_IUER_CONVERTIBLE (1 << 6)
173#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
174#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
175#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
176#define ASLE_IUER_WINDOWS_BTN (1 << 1)
177#define ASLE_IUER_POWER_BTN (1 << 0)
178
179/* Software System Control Interrupt (SWSCI) */
180#define SWSCI_SCIC_INDICATOR (1 << 0)
181#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
182#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
183#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
184#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
185#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
186#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
187#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
188#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
189#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
190
191#define SWSCI_FUNCTION_CODE(main, sub) \
192 ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
193 (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
194
195/* SWSCI: Get BIOS Data (GBDA) */
196#define SWSCI_GBDA 4
197#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
198#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
199#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
200#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
201#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
202#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
203#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
204
205/* SWSCI: System BIOS Callbacks (SBCB) */
206#define SWSCI_SBCB 6
207#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
208#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
209#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
210#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
211#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
212#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
213#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
214#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
215#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
216#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
217#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
218#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
219#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
220#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
221#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
222#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
223
154#define ACPI_OTHER_OUTPUT (0<<8) 224#define ACPI_OTHER_OUTPUT (0<<8)
155#define ACPI_VGA_OUTPUT (1<<8) 225#define ACPI_VGA_OUTPUT (1<<8)
156#define ACPI_TV_OUTPUT (2<<8) 226#define ACPI_TV_OUTPUT (2<<8)
@@ -158,6 +228,171 @@ struct opregion_asle {
158#define ACPI_LVDS_OUTPUT (4<<8) 228#define ACPI_LVDS_OUTPUT (4<<8)
159 229
160#ifdef CONFIG_ACPI 230#ifdef CONFIG_ACPI
231static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
232{
233 struct drm_i915_private *dev_priv = dev->dev_private;
234 struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
235 u32 main_function, sub_function, scic;
236 u16 pci_swsci;
237 u32 dslp;
238
239 if (!swsci)
240 return -ENODEV;
241
242 main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
243 SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
244 sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
245 SWSCI_SCIC_SUB_FUNCTION_SHIFT;
246
247 /* Check if we can call the function. See swsci_setup for details. */
248 if (main_function == SWSCI_SBCB) {
249 if ((dev_priv->opregion.swsci_sbcb_sub_functions &
250 (1 << sub_function)) == 0)
251 return -EINVAL;
252 } else if (main_function == SWSCI_GBDA) {
253 if ((dev_priv->opregion.swsci_gbda_sub_functions &
254 (1 << sub_function)) == 0)
255 return -EINVAL;
256 }
257
258 /* Driver sleep timeout in ms. */
259 dslp = ioread32(&swsci->dslp);
260 if (!dslp) {
261 /* The spec says 2ms should be the default, but it's too small
262 * for some machines. */
263 dslp = 50;
264 } else if (dslp > 500) {
265 /* Hey bios, trust must be earned. */
266 WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
267 dslp = 500;
268 }
269
270 /* The spec tells us to do this, but we are the only user... */
271 scic = ioread32(&swsci->scic);
272 if (scic & SWSCI_SCIC_INDICATOR) {
273 DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
274 return -EBUSY;
275 }
276
277 scic = function | SWSCI_SCIC_INDICATOR;
278
279 iowrite32(parm, &swsci->parm);
280 iowrite32(scic, &swsci->scic);
281
282 /* Ensure SCI event is selected and event trigger is cleared. */
283 pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
284 if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
285 pci_swsci |= PCI_SWSCI_SCISEL;
286 pci_swsci &= ~PCI_SWSCI_GSSCIE;
287 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
288 }
289
290 /* Use event trigger to tell bios to check the mail. */
291 pci_swsci |= PCI_SWSCI_GSSCIE;
292 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
293
294 /* Poll for the result. */
295#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
296 if (wait_for(C, dslp)) {
297 DRM_DEBUG_DRIVER("SWSCI request timed out\n");
298 return -ETIMEDOUT;
299 }
300
301 scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
302 SWSCI_SCIC_EXIT_STATUS_SHIFT;
303
304 /* Note: scic == 0 is an error! */
305 if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
306 DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
307 return -EIO;
308 }
309
310 if (parm_out)
311 *parm_out = ioread32(&swsci->parm);
312
313 return 0;
314
315#undef C
316}
317
318#define DISPLAY_TYPE_CRT 0
319#define DISPLAY_TYPE_TV 1
320#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
321#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
322
323int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
324 bool enable)
325{
326 struct drm_device *dev = intel_encoder->base.dev;
327 u32 parm = 0;
328 u32 type = 0;
329 u32 port;
330
331 /* don't care about old stuff for now */
332 if (!HAS_DDI(dev))
333 return 0;
334
335 port = intel_ddi_get_encoder_port(intel_encoder);
336 if (port == PORT_E) {
337 port = 0;
338 } else {
339 parm |= 1 << port;
340 port++;
341 }
342
343 if (!enable)
344 parm |= 4 << 8;
345
346 switch (intel_encoder->type) {
347 case INTEL_OUTPUT_ANALOG:
348 type = DISPLAY_TYPE_CRT;
349 break;
350 case INTEL_OUTPUT_UNKNOWN:
351 case INTEL_OUTPUT_DISPLAYPORT:
352 case INTEL_OUTPUT_HDMI:
353 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
354 break;
355 case INTEL_OUTPUT_EDP:
356 type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
357 break;
358 default:
359 WARN_ONCE(1, "unsupported intel_encoder type %d\n",
360 intel_encoder->type);
361 return -EINVAL;
362 }
363
364 parm |= type << (16 + port * 3);
365
366 return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
367}
368
369static const struct {
370 pci_power_t pci_power_state;
371 u32 parm;
372} power_state_map[] = {
373 { PCI_D0, 0x00 },
374 { PCI_D1, 0x01 },
375 { PCI_D2, 0x02 },
376 { PCI_D3hot, 0x04 },
377 { PCI_D3cold, 0x04 },
378};
379
380int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
381{
382 int i;
383
384 if (!HAS_DDI(dev))
385 return 0;
386
387 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
388 if (state == power_state_map[i].pci_power_state)
389 return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
390 power_state_map[i].parm, NULL);
391 }
392
393 return -EINVAL;
394}
395
161static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 396static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
162{ 397{
163 struct drm_i915_private *dev_priv = dev->dev_private; 398 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,12 +401,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
166 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 401 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
167 402
168 if (!(bclp & ASLE_BCLP_VALID)) 403 if (!(bclp & ASLE_BCLP_VALID))
169 return ASLE_BACKLIGHT_FAILED; 404 return ASLC_BACKLIGHT_FAILED;
170 405
171 bclp &= ASLE_BCLP_MSK; 406 bclp &= ASLE_BCLP_MSK;
172 if (bclp > 255) 407 if (bclp > 255)
173 return ASLE_BACKLIGHT_FAILED; 408 return ASLC_BACKLIGHT_FAILED;
174 409
410 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
175 intel_panel_set_backlight(dev, bclp, 255); 411 intel_panel_set_backlight(dev, bclp, 255);
176 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 412 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
177 413
@@ -183,13 +419,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
183 /* alsi is the current ALS reading in lux. 0 indicates below sensor 419 /* alsi is the current ALS reading in lux. 0 indicates below sensor
184 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 420 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
185 DRM_DEBUG_DRIVER("Illum is not supported\n"); 421 DRM_DEBUG_DRIVER("Illum is not supported\n");
186 return ASLE_ALS_ILLUM_FAILED; 422 return ASLC_ALS_ILLUM_FAILED;
187} 423}
188 424
189static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 425static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
190{ 426{
191 DRM_DEBUG_DRIVER("PWM freq is not supported\n"); 427 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
192 return ASLE_PWM_FREQ_FAILED; 428 return ASLC_PWM_FREQ_FAILED;
193} 429}
194 430
195static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 431static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +433,106 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
197 /* Panel fitting is currently controlled by the X code, so this is a 433 /* Panel fitting is currently controlled by the X code, so this is a
198 noop until modesetting support works fully */ 434 noop until modesetting support works fully */
199 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 435 DRM_DEBUG_DRIVER("Pfit is not supported\n");
200 return ASLE_PFIT_FAILED; 436 return ASLC_PFIT_FAILED;
437}
438
439static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
440{
441 DRM_DEBUG_DRIVER("SROT is not supported\n");
442 return ASLC_ROTATION_ANGLES_FAILED;
443}
444
445static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
446{
447 if (!iuer)
448 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
449 if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
450 DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
451 if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
452 DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
453 if (iuer & ASLE_IUER_VOLUME_UP_BTN)
454 DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
455 if (iuer & ASLE_IUER_WINDOWS_BTN)
456 DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
457 if (iuer & ASLE_IUER_POWER_BTN)
458 DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
459
460 return ASLC_BUTTON_ARRAY_FAILED;
461}
462
463static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
464{
465 if (iuer & ASLE_IUER_CONVERTIBLE)
466 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
467 else
468 DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
469
470 return ASLC_CONVERTIBLE_FAILED;
471}
472
473static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
474{
475 if (iuer & ASLE_IUER_DOCKING)
476 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
477 else
478 DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
479
480 return ASLC_DOCKING_FAILED;
481}
482
483static u32 asle_isct_state(struct drm_device *dev)
484{
485 DRM_DEBUG_DRIVER("ISCT is not supported\n");
486 return ASLC_ISCT_STATE_FAILED;
201} 487}
202 488
203void intel_opregion_asle_intr(struct drm_device *dev) 489void intel_opregion_asle_intr(struct drm_device *dev)
204{ 490{
205 struct drm_i915_private *dev_priv = dev->dev_private; 491 struct drm_i915_private *dev_priv = dev->dev_private;
206 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 492 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
207 u32 asle_stat = 0; 493 u32 aslc_stat = 0;
208 u32 asle_req; 494 u32 aslc_req;
209 495
210 if (!asle) 496 if (!asle)
211 return; 497 return;
212 498
213 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; 499 aslc_req = ioread32(&asle->aslc);
214 500
215 if (!asle_req) { 501 if (!(aslc_req & ASLC_REQ_MSK)) {
216 DRM_DEBUG_DRIVER("non asle set request??\n"); 502 DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
503 aslc_req);
217 return; 504 return;
218 } 505 }
219 506
220 if (asle_req & ASLE_SET_ALS_ILLUM) 507 if (aslc_req & ASLC_SET_ALS_ILLUM)
221 asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); 508 aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
509
510 if (aslc_req & ASLC_SET_BACKLIGHT)
511 aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
512
513 if (aslc_req & ASLC_SET_PFIT)
514 aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
515
516 if (aslc_req & ASLC_SET_PWM_FREQ)
517 aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
222 518
223 if (asle_req & ASLE_SET_BACKLIGHT) 519 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
224 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); 520 aslc_stat |= asle_set_supported_rotation_angles(dev,
521 ioread32(&asle->srot));
225 522
226 if (asle_req & ASLE_SET_PFIT) 523 if (aslc_req & ASLC_BUTTON_ARRAY)
227 asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); 524 aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
228 525
229 if (asle_req & ASLE_SET_PWM_FREQ) 526 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
230 asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); 527 aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
231 528
232 iowrite32(asle_stat, &asle->aslc); 529 if (aslc_req & ASLC_DOCKING_INDICATOR)
530 aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
531
532 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
533 aslc_stat |= asle_isct_state(dev);
534
535 iowrite32(aslc_stat, &asle->aslc);
233} 536}
234 537
235#define ACPI_EV_DISPLAY_SWITCH (1<<0) 538#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -446,8 +749,68 @@ void intel_opregion_fini(struct drm_device *dev)
446 opregion->swsci = NULL; 749 opregion->swsci = NULL;
447 opregion->asle = NULL; 750 opregion->asle = NULL;
448 opregion->vbt = NULL; 751 opregion->vbt = NULL;
752 opregion->lid_state = NULL;
753}
754
755static void swsci_setup(struct drm_device *dev)
756{
757 struct drm_i915_private *dev_priv = dev->dev_private;
758 struct intel_opregion *opregion = &dev_priv->opregion;
759 bool requested_callbacks = false;
760 u32 tmp;
761
762 /* Sub-function code 0 is okay, let's allow them. */
763 opregion->swsci_gbda_sub_functions = 1;
764 opregion->swsci_sbcb_sub_functions = 1;
765
766 /* We use GBDA to ask for supported GBDA calls. */
767 if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
768 /* make the bits match the sub-function codes */
769 tmp <<= 1;
770 opregion->swsci_gbda_sub_functions |= tmp;
771 }
772
773 /*
774 * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
775 * must not call interfaces that are not specifically requested by the
776 * bios.
777 */
778 if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
779 /* here, the bits already match sub-function codes */
780 opregion->swsci_sbcb_sub_functions |= tmp;
781 requested_callbacks = true;
782 }
783
784 /*
785 * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
786 * the callback is _requested_. But we still can't call interfaces that
787 * are not requested.
788 */
789 if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
790 /* make the bits match the sub-function codes */
791 u32 low = tmp & 0x7ff;
792 u32 high = tmp & ~0xfff; /* bit 11 is reserved */
793 tmp = (high << 4) | (low << 1) | 1;
794
795 /* best guess what to do with supported wrt requested */
796 if (requested_callbacks) {
797 u32 req = opregion->swsci_sbcb_sub_functions;
798 if ((req & tmp) != req)
799 DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
800 /* XXX: for now, trust the requested callbacks */
801 /* opregion->swsci_sbcb_sub_functions &= tmp; */
802 } else {
803 opregion->swsci_sbcb_sub_functions |= tmp;
804 }
805 }
806
807 DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
808 opregion->swsci_gbda_sub_functions,
809 opregion->swsci_sbcb_sub_functions);
449} 810}
450#endif 811#else /* CONFIG_ACPI */
812static inline void swsci_setup(struct drm_device *dev) {}
813#endif /* CONFIG_ACPI */
451 814
452int intel_opregion_setup(struct drm_device *dev) 815int intel_opregion_setup(struct drm_device *dev)
453{ 816{
@@ -490,6 +853,7 @@ int intel_opregion_setup(struct drm_device *dev)
490 if (mboxes & MBOX_SWSCI) { 853 if (mboxes & MBOX_SWSCI) {
491 DRM_DEBUG_DRIVER("SWSCI supported\n"); 854 DRM_DEBUG_DRIVER("SWSCI supported\n");
492 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 855 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
856 swsci_setup(dev);
493 } 857 }
494 if (mboxes & MBOX_ASLE) { 858 if (mboxes & MBOX_ASLE) {
495 DRM_DEBUG_DRIVER("ASLE supported\n"); 859 DRM_DEBUG_DRIVER("ASLE supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ddfd0aefe0c0..a98a990fbab3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
821static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, 821static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
822 struct intel_crtc *crtc) 822 struct intel_crtc *crtc)
823{ 823{
824 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
825
826 if (!crtc->active) 824 if (!crtc->active)
827 return -EINVAL; 825 return -EINVAL;
828 826
829 /* can't use the overlay with double wide pipe */ 827 /* can't use the overlay with double wide pipe */
830 if (INTEL_INFO(overlay->dev)->gen < 4 && 828 if (crtc->config.double_wide)
831 (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
832 return -EINVAL; 829 return -EINVAL;
833 830
834 return 0; 831 return 0;
@@ -1056,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1056 return ret; 1053 return ret;
1057 } 1054 }
1058 1055
1059 params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL); 1056 params = kmalloc(sizeof(*params), GFP_KERNEL);
1060 if (!params) 1057 if (!params)
1061 return -ENOMEM; 1058 return -ENOMEM;
1062 1059
@@ -1323,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
1323 if (!HAS_OVERLAY(dev)) 1320 if (!HAS_OVERLAY(dev))
1324 return; 1321 return;
1325 1322
1326 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); 1323 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1327 if (!overlay) 1324 if (!overlay)
1328 return; 1325 return;
1329 1326
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 293564a2896a..de1518614827 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,23 +50,22 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
50 struct intel_crtc_config *pipe_config, 50 struct intel_crtc_config *pipe_config,
51 int fitting_mode) 51 int fitting_mode)
52{ 52{
53 struct drm_display_mode *mode, *adjusted_mode; 53 struct drm_display_mode *adjusted_mode;
54 int x, y, width, height; 54 int x, y, width, height;
55 55
56 mode = &pipe_config->requested_mode;
57 adjusted_mode = &pipe_config->adjusted_mode; 56 adjusted_mode = &pipe_config->adjusted_mode;
58 57
59 x = y = width = height = 0; 58 x = y = width = height = 0;
60 59
61 /* Native modes don't need fitting */ 60 /* Native modes don't need fitting */
62 if (adjusted_mode->hdisplay == mode->hdisplay && 61 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
63 adjusted_mode->vdisplay == mode->vdisplay) 62 adjusted_mode->vdisplay == pipe_config->pipe_src_h)
64 goto done; 63 goto done;
65 64
66 switch (fitting_mode) { 65 switch (fitting_mode) {
67 case DRM_MODE_SCALE_CENTER: 66 case DRM_MODE_SCALE_CENTER:
68 width = mode->hdisplay; 67 width = pipe_config->pipe_src_w;
69 height = mode->vdisplay; 68 height = pipe_config->pipe_src_h;
70 x = (adjusted_mode->hdisplay - width + 1)/2; 69 x = (adjusted_mode->hdisplay - width + 1)/2;
71 y = (adjusted_mode->vdisplay - height + 1)/2; 70 y = (adjusted_mode->vdisplay - height + 1)/2;
72 break; 71 break;
@@ -74,17 +73,19 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
74 case DRM_MODE_SCALE_ASPECT: 73 case DRM_MODE_SCALE_ASPECT:
75 /* Scale but preserve the aspect ratio */ 74 /* Scale but preserve the aspect ratio */
76 { 75 {
77 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 76 u32 scaled_width = adjusted_mode->hdisplay
78 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 77 * pipe_config->pipe_src_h;
78 u32 scaled_height = pipe_config->pipe_src_w
79 * adjusted_mode->vdisplay;
79 if (scaled_width > scaled_height) { /* pillar */ 80 if (scaled_width > scaled_height) { /* pillar */
80 width = scaled_height / mode->vdisplay; 81 width = scaled_height / pipe_config->pipe_src_h;
81 if (width & 1) 82 if (width & 1)
82 width++; 83 width++;
83 x = (adjusted_mode->hdisplay - width + 1) / 2; 84 x = (adjusted_mode->hdisplay - width + 1) / 2;
84 y = 0; 85 y = 0;
85 height = adjusted_mode->vdisplay; 86 height = adjusted_mode->vdisplay;
86 } else if (scaled_width < scaled_height) { /* letter */ 87 } else if (scaled_width < scaled_height) { /* letter */
87 height = scaled_width / mode->hdisplay; 88 height = scaled_width / pipe_config->pipe_src_w;
88 if (height & 1) 89 if (height & 1)
89 height++; 90 height++;
90 y = (adjusted_mode->vdisplay - height + 1) / 2; 91 y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
171 return (FACTOR * ratio + FACTOR/2) / FACTOR; 172 return (FACTOR * ratio + FACTOR/2) / FACTOR;
172} 173}
173 174
175static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
176 u32 *pfit_control)
177{
178 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
179 u32 scaled_width = adjusted_mode->hdisplay *
180 pipe_config->pipe_src_h;
181 u32 scaled_height = pipe_config->pipe_src_w *
182 adjusted_mode->vdisplay;
183
184 /* 965+ is easy, it does everything in hw */
185 if (scaled_width > scaled_height)
186 *pfit_control |= PFIT_ENABLE |
187 PFIT_SCALING_PILLAR;
188 else if (scaled_width < scaled_height)
189 *pfit_control |= PFIT_ENABLE |
190 PFIT_SCALING_LETTER;
191 else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
192 *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
193}
194
195static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
196 u32 *pfit_control, u32 *pfit_pgm_ratios,
197 u32 *border)
198{
199 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
200 u32 scaled_width = adjusted_mode->hdisplay *
201 pipe_config->pipe_src_h;
202 u32 scaled_height = pipe_config->pipe_src_w *
203 adjusted_mode->vdisplay;
204 u32 bits;
205
206 /*
207 * For earlier chips we have to calculate the scaling
208 * ratio by hand and program it into the
209 * PFIT_PGM_RATIO register
210 */
211 if (scaled_width > scaled_height) { /* pillar */
212 centre_horizontally(adjusted_mode,
213 scaled_height /
214 pipe_config->pipe_src_h);
215
216 *border = LVDS_BORDER_ENABLE;
217 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
218 bits = panel_fitter_scaling(pipe_config->pipe_src_h,
219 adjusted_mode->vdisplay);
220
221 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
222 bits << PFIT_VERT_SCALE_SHIFT);
223 *pfit_control |= (PFIT_ENABLE |
224 VERT_INTERP_BILINEAR |
225 HORIZ_INTERP_BILINEAR);
226 }
227 } else if (scaled_width < scaled_height) { /* letter */
228 centre_vertically(adjusted_mode,
229 scaled_width /
230 pipe_config->pipe_src_w);
231
232 *border = LVDS_BORDER_ENABLE;
233 if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
234 bits = panel_fitter_scaling(pipe_config->pipe_src_w,
235 adjusted_mode->hdisplay);
236
237 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
238 bits << PFIT_VERT_SCALE_SHIFT);
239 *pfit_control |= (PFIT_ENABLE |
240 VERT_INTERP_BILINEAR |
241 HORIZ_INTERP_BILINEAR);
242 }
243 } else {
244 /* Aspects match, Let hw scale both directions */
245 *pfit_control |= (PFIT_ENABLE |
246 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
247 VERT_INTERP_BILINEAR |
248 HORIZ_INTERP_BILINEAR);
249 }
250}
251
174void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, 252void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
175 struct intel_crtc_config *pipe_config, 253 struct intel_crtc_config *pipe_config,
176 int fitting_mode) 254 int fitting_mode)
177{ 255{
178 struct drm_device *dev = intel_crtc->base.dev; 256 struct drm_device *dev = intel_crtc->base.dev;
179 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 257 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
180 struct drm_display_mode *mode, *adjusted_mode; 258 struct drm_display_mode *adjusted_mode;
181 259
182 mode = &pipe_config->requested_mode;
183 adjusted_mode = &pipe_config->adjusted_mode; 260 adjusted_mode = &pipe_config->adjusted_mode;
184 261
185 /* Native modes don't need fitting */ 262 /* Native modes don't need fitting */
186 if (adjusted_mode->hdisplay == mode->hdisplay && 263 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
187 adjusted_mode->vdisplay == mode->vdisplay) 264 adjusted_mode->vdisplay == pipe_config->pipe_src_h)
188 goto out; 265 goto out;
189 266
190 switch (fitting_mode) { 267 switch (fitting_mode) {
@@ -193,81 +270,25 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
193 * For centered modes, we have to calculate border widths & 270 * For centered modes, we have to calculate border widths &
194 * heights and modify the values programmed into the CRTC. 271 * heights and modify the values programmed into the CRTC.
195 */ 272 */
196 centre_horizontally(adjusted_mode, mode->hdisplay); 273 centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
197 centre_vertically(adjusted_mode, mode->vdisplay); 274 centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
198 border = LVDS_BORDER_ENABLE; 275 border = LVDS_BORDER_ENABLE;
199 break; 276 break;
200 case DRM_MODE_SCALE_ASPECT: 277 case DRM_MODE_SCALE_ASPECT:
201 /* Scale but preserve the aspect ratio */ 278 /* Scale but preserve the aspect ratio */
202 if (INTEL_INFO(dev)->gen >= 4) { 279 if (INTEL_INFO(dev)->gen >= 4)
203 u32 scaled_width = adjusted_mode->hdisplay * 280 i965_scale_aspect(pipe_config, &pfit_control);
204 mode->vdisplay; 281 else
205 u32 scaled_height = mode->hdisplay * 282 i9xx_scale_aspect(pipe_config, &pfit_control,
206 adjusted_mode->vdisplay; 283 &pfit_pgm_ratios, &border);
207
208 /* 965+ is easy, it does everything in hw */
209 if (scaled_width > scaled_height)
210 pfit_control |= PFIT_ENABLE |
211 PFIT_SCALING_PILLAR;
212 else if (scaled_width < scaled_height)
213 pfit_control |= PFIT_ENABLE |
214 PFIT_SCALING_LETTER;
215 else if (adjusted_mode->hdisplay != mode->hdisplay)
216 pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
217 } else {
218 u32 scaled_width = adjusted_mode->hdisplay *
219 mode->vdisplay;
220 u32 scaled_height = mode->hdisplay *
221 adjusted_mode->vdisplay;
222 /*
223 * For earlier chips we have to calculate the scaling
224 * ratio by hand and program it into the
225 * PFIT_PGM_RATIO register
226 */
227 if (scaled_width > scaled_height) { /* pillar */
228 centre_horizontally(adjusted_mode,
229 scaled_height /
230 mode->vdisplay);
231
232 border = LVDS_BORDER_ENABLE;
233 if (mode->vdisplay != adjusted_mode->vdisplay) {
234 u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
235 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
236 bits << PFIT_VERT_SCALE_SHIFT);
237 pfit_control |= (PFIT_ENABLE |
238 VERT_INTERP_BILINEAR |
239 HORIZ_INTERP_BILINEAR);
240 }
241 } else if (scaled_width < scaled_height) { /* letter */
242 centre_vertically(adjusted_mode,
243 scaled_width /
244 mode->hdisplay);
245
246 border = LVDS_BORDER_ENABLE;
247 if (mode->hdisplay != adjusted_mode->hdisplay) {
248 u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
249 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
250 bits << PFIT_VERT_SCALE_SHIFT);
251 pfit_control |= (PFIT_ENABLE |
252 VERT_INTERP_BILINEAR |
253 HORIZ_INTERP_BILINEAR);
254 }
255 } else {
256 /* Aspects match, Let hw scale both directions */
257 pfit_control |= (PFIT_ENABLE |
258 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
259 VERT_INTERP_BILINEAR |
260 HORIZ_INTERP_BILINEAR);
261 }
262 }
263 break; 284 break;
264 case DRM_MODE_SCALE_FULLSCREEN: 285 case DRM_MODE_SCALE_FULLSCREEN:
265 /* 286 /*
266 * Full scaling, even if it changes the aspect ratio. 287 * Full scaling, even if it changes the aspect ratio.
267 * Fortunately this is all done for us in hw. 288 * Fortunately this is all done for us in hw.
268 */ 289 */
269 if (mode->vdisplay != adjusted_mode->vdisplay || 290 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
270 mode->hdisplay != adjusted_mode->hdisplay) { 291 pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
271 pfit_control |= PFIT_ENABLE; 292 pfit_control |= PFIT_ENABLE;
272 if (INTEL_INFO(dev)->gen >= 4) 293 if (INTEL_INFO(dev)->gen >= 4)
273 pfit_control |= PFIT_SCALING_AUTO; 294 pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
308{ 329{
309 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
310 331
311 if (INTEL_INFO(dev)->gen >= 4) 332 if (IS_GEN4(dev))
312 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; 333 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
313 334
314 if (IS_GEN2(dev)) 335 if (IS_GEN2(dev))
@@ -351,6 +372,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
351 I915_WRITE(BLC_PWM_CTL2, 372 I915_WRITE(BLC_PWM_CTL2,
352 dev_priv->regfile.saveBLC_PWM_CTL2); 373 dev_priv->regfile.saveBLC_PWM_CTL2);
353 } 374 }
375
376 if (IS_VALLEYVIEW(dev) && !val)
377 val = 0x0f42ffff;
354 } 378 }
355 379
356 return val; 380 return val;
@@ -441,7 +465,8 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
441 I915_WRITE(BLC_PWM_CPU_CTL, val | level); 465 I915_WRITE(BLC_PWM_CPU_CTL, val | level);
442} 466}
443 467
444static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) 468static void intel_panel_actually_set_backlight(struct drm_device *dev,
469 u32 level)
445{ 470{
446 struct drm_i915_private *dev_priv = dev->dev_private; 471 struct drm_i915_private *dev_priv = dev->dev_private;
447 u32 tmp; 472 u32 tmp;
@@ -549,6 +574,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
549 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 574 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
550 unsigned long flags; 575 unsigned long flags;
551 576
577 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
578
552 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 579 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
553 580
554 if (dev_priv->backlight.level == 0) { 581 if (dev_priv->backlight.level == 0) {
@@ -607,10 +634,24 @@ set_level:
607 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 634 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
608} 635}
609 636
637/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
638static void intel_panel_init_backlight_regs(struct drm_device *dev)
639{
640 struct drm_i915_private *dev_priv = dev->dev_private;
641
642 if (IS_VALLEYVIEW(dev)) {
643 u32 cur_val = I915_READ(BLC_PWM_CTL) &
644 BACKLIGHT_DUTY_CYCLE_MASK;
645 I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val);
646 }
647}
648
610static void intel_panel_init_backlight(struct drm_device *dev) 649static void intel_panel_init_backlight(struct drm_device *dev)
611{ 650{
612 struct drm_i915_private *dev_priv = dev->dev_private; 651 struct drm_i915_private *dev_priv = dev->dev_private;
613 652
653 intel_panel_init_backlight_regs(dev);
654
614 dev_priv->backlight.level = intel_panel_get_backlight(dev); 655 dev_priv->backlight.level = intel_panel_get_backlight(dev);
615 dev_priv->backlight.enabled = dev_priv->backlight.level != 0; 656 dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
616} 657}
@@ -637,10 +678,12 @@ intel_panel_detect(struct drm_device *dev)
637 } 678 }
638} 679}
639 680
640#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 681#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
641static int intel_panel_update_status(struct backlight_device *bd) 682static int intel_panel_update_status(struct backlight_device *bd)
642{ 683{
643 struct drm_device *dev = bl_get_data(bd); 684 struct drm_device *dev = bl_get_data(bd);
685 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
686 bd->props.brightness, bd->props.max_brightness);
644 intel_panel_set_backlight(dev, bd->props.brightness, 687 intel_panel_set_backlight(dev, bd->props.brightness,
645 bd->props.max_brightness); 688 bd->props.max_brightness);
646 return 0; 689 return 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c2ea3e985c..09ac9e79830f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <drm/i915_powerwell.h> 33#include <drm/i915_powerwell.h>
34 34
35/**
36 * RC6 is a special power stage which allows the GPU to enter an very
37 * low-voltage mode when idle, using down to 0V while at this stage. This
38 * stage is entered automatically when the GPU is idle when RC6 support is
39 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 *
41 * There are different RC6 modes available in Intel GPU, which differentiate
42 * among each other with the latency required to enter and leave RC6 and
43 * voltage consumed by the GPU in different states.
44 *
45 * The combination of the following flags define which states GPU is allowed
46 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
47 * RC6pp is deepest RC6. Their support by hardware varies according to the
48 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
49 * which brings the most power savings; deeper states save more power, but
50 * require higher latency to switch to and wake up.
51 */
52#define INTEL_RC6_ENABLE (1<<0)
53#define INTEL_RC6p_ENABLE (1<<1)
54#define INTEL_RC6pp_ENABLE (1<<2)
55
35/* FBC, or Frame Buffer Compression, is a technique employed to compress the 56/* FBC, or Frame Buffer Compression, is a technique employed to compress the
36 * framebuffer contents in-memory, aiming at reducing the required bandwidth 57 * framebuffer contents in-memory, aiming at reducing the required bandwidth
37 * during in-memory transfers and, therefore, reduce the power packet. 58 * during in-memory transfers and, therefore, reduce the power packet.
@@ -43,14 +64,6 @@
43 * i915.i915_enable_fbc parameter 64 * i915.i915_enable_fbc parameter
44 */ 65 */
45 66
46static bool intel_crtc_active(struct drm_crtc *crtc)
47{
48 /* Be paranoid as we can arrive here with only partial
49 * state retrieved from the hardware during setup.
50 */
51 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
52}
53
54static void i8xx_disable_fbc(struct drm_device *dev) 67static void i8xx_disable_fbc(struct drm_device *dev)
55{ 68{
56 struct drm_i915_private *dev_priv = dev->dev_private; 69 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -241,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
241 dpfc_ctl &= ~DPFC_CTL_EN; 254 dpfc_ctl &= ~DPFC_CTL_EN;
242 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 255 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
243 256
244 if (IS_IVYBRIDGE(dev))
245 /* WaFbcDisableDpfcClockGating:ivb */
246 I915_WRITE(ILK_DSPCLK_GATE_D,
247 I915_READ(ILK_DSPCLK_GATE_D) &
248 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
249
250 if (IS_HASWELL(dev))
251 /* WaFbcDisableDpfcClockGating:hsw */
252 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
253 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
254 ~HSW_DPFC_GATING_DISABLE);
255
256 DRM_DEBUG_KMS("disabled FBC\n"); 257 DRM_DEBUG_KMS("disabled FBC\n");
257 } 258 }
258} 259}
@@ -282,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
282 if (IS_IVYBRIDGE(dev)) { 283 if (IS_IVYBRIDGE(dev)) {
283 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 284 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
284 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS); 285 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
285 /* WaFbcDisableDpfcClockGating:ivb */
286 I915_WRITE(ILK_DSPCLK_GATE_D,
287 I915_READ(ILK_DSPCLK_GATE_D) |
288 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
289 } else { 286 } else {
290 /* WaFbcAsynchFlipDisableFbcQueue:hsw */ 287 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
291 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe), 288 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
292 HSW_BYPASS_FBC_QUEUE); 289 HSW_BYPASS_FBC_QUEUE);
293 /* WaFbcDisableDpfcClockGating:hsw */
294 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
295 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
296 HSW_DPFC_GATING_DISABLE);
297 } 290 }
298 291
299 I915_WRITE(SNB_DPFC_CTL_SA, 292 I915_WRITE(SNB_DPFC_CTL_SA,
@@ -378,7 +371,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
378 371
379 intel_cancel_fbc_work(dev_priv); 372 intel_cancel_fbc_work(dev_priv);
380 373
381 work = kzalloc(sizeof *work, GFP_KERNEL); 374 work = kzalloc(sizeof(*work), GFP_KERNEL);
382 if (work == NULL) { 375 if (work == NULL) {
383 DRM_ERROR("Failed to allocate FBC work structure\n"); 376 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 377 dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +451,8 @@ void intel_update_fbc(struct drm_device *dev)
458 struct drm_framebuffer *fb; 451 struct drm_framebuffer *fb;
459 struct intel_framebuffer *intel_fb; 452 struct intel_framebuffer *intel_fb;
460 struct drm_i915_gem_object *obj; 453 struct drm_i915_gem_object *obj;
461 unsigned int max_hdisplay, max_vdisplay; 454 const struct drm_display_mode *adjusted_mode;
455 unsigned int max_width, max_height;
462 456
463 if (!I915_HAS_FBC(dev)) { 457 if (!I915_HAS_FBC(dev)) {
464 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); 458 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +476,7 @@ void intel_update_fbc(struct drm_device *dev)
482 */ 476 */
483 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 477 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
484 if (intel_crtc_active(tmp_crtc) && 478 if (intel_crtc_active(tmp_crtc) &&
485 !to_intel_crtc(tmp_crtc)->primary_disabled) { 479 to_intel_crtc(tmp_crtc)->primary_enabled) {
486 if (crtc) { 480 if (crtc) {
487 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) 481 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
488 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 482 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +496,7 @@ void intel_update_fbc(struct drm_device *dev)
502 fb = crtc->fb; 496 fb = crtc->fb;
503 intel_fb = to_intel_framebuffer(fb); 497 intel_fb = to_intel_framebuffer(fb);
504 obj = intel_fb->obj; 498 obj = intel_fb->obj;
499 adjusted_mode = &intel_crtc->config.adjusted_mode;
505 500
506 if (i915_enable_fbc < 0 && 501 if (i915_enable_fbc < 0 &&
507 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { 502 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +509,8 @@ void intel_update_fbc(struct drm_device *dev)
514 DRM_DEBUG_KMS("fbc disabled per module param\n"); 509 DRM_DEBUG_KMS("fbc disabled per module param\n");
515 goto out_disable; 510 goto out_disable;
516 } 511 }
517 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 512 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
518 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 513 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
519 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) 514 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
520 DRM_DEBUG_KMS("mode incompatible with compression, " 515 DRM_DEBUG_KMS("mode incompatible with compression, "
521 "disabling\n"); 516 "disabling\n");
@@ -523,14 +518,14 @@ void intel_update_fbc(struct drm_device *dev)
523 } 518 }
524 519
525 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 520 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
526 max_hdisplay = 4096; 521 max_width = 4096;
527 max_vdisplay = 2048; 522 max_height = 2048;
528 } else { 523 } else {
529 max_hdisplay = 2048; 524 max_width = 2048;
530 max_vdisplay = 1536; 525 max_height = 1536;
531 } 526 }
532 if ((crtc->mode.hdisplay > max_hdisplay) || 527 if (intel_crtc->config.pipe_src_w > max_width ||
533 (crtc->mode.vdisplay > max_vdisplay)) { 528 intel_crtc->config.pipe_src_h > max_height) {
534 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) 529 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
535 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 530 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
536 goto out_disable; 531 goto out_disable;
@@ -1087,8 +1082,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1087 return enabled; 1082 return enabled;
1088} 1083}
1089 1084
1090static void pineview_update_wm(struct drm_device *dev) 1085static void pineview_update_wm(struct drm_crtc *unused_crtc)
1091{ 1086{
1087 struct drm_device *dev = unused_crtc->dev;
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1088 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_crtc *crtc; 1089 struct drm_crtc *crtc;
1094 const struct cxsr_latency *latency; 1090 const struct cxsr_latency *latency;
@@ -1105,8 +1101,12 @@ static void pineview_update_wm(struct drm_device *dev)
1105 1101
1106 crtc = single_enabled_crtc(dev); 1102 crtc = single_enabled_crtc(dev);
1107 if (crtc) { 1103 if (crtc) {
1108 int clock = crtc->mode.clock; 1104 const struct drm_display_mode *adjusted_mode;
1109 int pixel_size = crtc->fb->bits_per_pixel / 8; 1105 int pixel_size = crtc->fb->bits_per_pixel / 8;
1106 int clock;
1107
1108 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1109 clock = adjusted_mode->crtc_clock;
1110 1110
1111 /* Display SR */ 1111 /* Display SR */
1112 wm = intel_calculate_wm(clock, &pineview_display_wm, 1112 wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1166,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1166 int *cursor_wm) 1166 int *cursor_wm)
1167{ 1167{
1168 struct drm_crtc *crtc; 1168 struct drm_crtc *crtc;
1169 const struct drm_display_mode *adjusted_mode;
1169 int htotal, hdisplay, clock, pixel_size; 1170 int htotal, hdisplay, clock, pixel_size;
1170 int line_time_us, line_count; 1171 int line_time_us, line_count;
1171 int entries, tlb_miss; 1172 int entries, tlb_miss;
@@ -1177,9 +1178,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1177 return false; 1178 return false;
1178 } 1179 }
1179 1180
1180 htotal = crtc->mode.htotal; 1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1181 hdisplay = crtc->mode.hdisplay; 1182 clock = adjusted_mode->crtc_clock;
1182 clock = crtc->mode.clock; 1183 htotal = adjusted_mode->htotal;
1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1183 pixel_size = crtc->fb->bits_per_pixel / 8; 1185 pixel_size = crtc->fb->bits_per_pixel / 8;
1184 1186
1185 /* Use the small buffer method to calculate plane watermark */ 1187 /* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1252,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1250 int *display_wm, int *cursor_wm) 1252 int *display_wm, int *cursor_wm)
1251{ 1253{
1252 struct drm_crtc *crtc; 1254 struct drm_crtc *crtc;
1255 const struct drm_display_mode *adjusted_mode;
1253 int hdisplay, htotal, pixel_size, clock; 1256 int hdisplay, htotal, pixel_size, clock;
1254 unsigned long line_time_us; 1257 unsigned long line_time_us;
1255 int line_count, line_size; 1258 int line_count, line_size;
@@ -1262,9 +1265,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1262 } 1265 }
1263 1266
1264 crtc = intel_get_crtc_for_plane(dev, plane); 1267 crtc = intel_get_crtc_for_plane(dev, plane);
1265 hdisplay = crtc->mode.hdisplay; 1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1266 htotal = crtc->mode.htotal; 1269 clock = adjusted_mode->crtc_clock;
1267 clock = crtc->mode.clock; 1270 htotal = adjusted_mode->htotal;
1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1268 pixel_size = crtc->fb->bits_per_pixel / 8; 1272 pixel_size = crtc->fb->bits_per_pixel / 8;
1269 1273
1270 line_time_us = (htotal * 1000) / clock; 1274 line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1307,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1303 if (!intel_crtc_active(crtc)) 1307 if (!intel_crtc_active(crtc))
1304 return false; 1308 return false;
1305 1309
1306 clock = crtc->mode.clock; /* VESA DOT Clock */ 1310 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1307 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ 1311 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1308 1312
1309 entries = (clock / 1000) * pixel_size; 1313 entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1369,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
1365 1369
1366#define single_plane_enabled(mask) is_power_of_2(mask) 1370#define single_plane_enabled(mask) is_power_of_2(mask)
1367 1371
1368static void valleyview_update_wm(struct drm_device *dev) 1372static void valleyview_update_wm(struct drm_crtc *crtc)
1369{ 1373{
1374 struct drm_device *dev = crtc->dev;
1370 static const int sr_latency_ns = 12000; 1375 static const int sr_latency_ns = 12000;
1371 struct drm_i915_private *dev_priv = dev->dev_private; 1376 struct drm_i915_private *dev_priv = dev->dev_private;
1372 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1377 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1429,9 @@ static void valleyview_update_wm(struct drm_device *dev)
1424 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1429 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1425} 1430}
1426 1431
1427static void g4x_update_wm(struct drm_device *dev) 1432static void g4x_update_wm(struct drm_crtc *crtc)
1428{ 1433{
1434 struct drm_device *dev = crtc->dev;
1429 static const int sr_latency_ns = 12000; 1435 static const int sr_latency_ns = 12000;
1430 struct drm_i915_private *dev_priv = dev->dev_private; 1436 struct drm_i915_private *dev_priv = dev->dev_private;
1431 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1437 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1482,9 @@ static void g4x_update_wm(struct drm_device *dev)
1476 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1482 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1477} 1483}
1478 1484
1479static void i965_update_wm(struct drm_device *dev) 1485static void i965_update_wm(struct drm_crtc *unused_crtc)
1480{ 1486{
1487 struct drm_device *dev = unused_crtc->dev;
1481 struct drm_i915_private *dev_priv = dev->dev_private; 1488 struct drm_i915_private *dev_priv = dev->dev_private;
1482 struct drm_crtc *crtc; 1489 struct drm_crtc *crtc;
1483 int srwm = 1; 1490 int srwm = 1;
@@ -1488,9 +1495,11 @@ static void i965_update_wm(struct drm_device *dev)
1488 if (crtc) { 1495 if (crtc) {
1489 /* self-refresh has much higher latency */ 1496 /* self-refresh has much higher latency */
1490 static const int sr_latency_ns = 12000; 1497 static const int sr_latency_ns = 12000;
1491 int clock = crtc->mode.clock; 1498 const struct drm_display_mode *adjusted_mode =
1492 int htotal = crtc->mode.htotal; 1499 &to_intel_crtc(crtc)->config.adjusted_mode;
1493 int hdisplay = crtc->mode.hdisplay; 1500 int clock = adjusted_mode->crtc_clock;
1501 int htotal = adjusted_mode->htotal;
1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1494 int pixel_size = crtc->fb->bits_per_pixel / 8; 1503 int pixel_size = crtc->fb->bits_per_pixel / 8;
1495 unsigned long line_time_us; 1504 unsigned long line_time_us;
1496 int entries; 1505 int entries;
@@ -1541,8 +1550,9 @@ static void i965_update_wm(struct drm_device *dev)
1541 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1550 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1542} 1551}
1543 1552
1544static void i9xx_update_wm(struct drm_device *dev) 1553static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545{ 1554{
1555 struct drm_device *dev = unused_crtc->dev;
1546 struct drm_i915_private *dev_priv = dev->dev_private; 1556 struct drm_i915_private *dev_priv = dev->dev_private;
1547 const struct intel_watermark_params *wm_info; 1557 const struct intel_watermark_params *wm_info;
1548 uint32_t fwater_lo; 1558 uint32_t fwater_lo;
@@ -1562,11 +1572,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1562 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1572 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1563 crtc = intel_get_crtc_for_plane(dev, 0); 1573 crtc = intel_get_crtc_for_plane(dev, 0);
1564 if (intel_crtc_active(crtc)) { 1574 if (intel_crtc_active(crtc)) {
1575 const struct drm_display_mode *adjusted_mode;
1565 int cpp = crtc->fb->bits_per_pixel / 8; 1576 int cpp = crtc->fb->bits_per_pixel / 8;
1566 if (IS_GEN2(dev)) 1577 if (IS_GEN2(dev))
1567 cpp = 4; 1578 cpp = 4;
1568 1579
1569 planea_wm = intel_calculate_wm(crtc->mode.clock, 1580 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1581 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1570 wm_info, fifo_size, cpp, 1582 wm_info, fifo_size, cpp,
1571 latency_ns); 1583 latency_ns);
1572 enabled = crtc; 1584 enabled = crtc;
@@ -1576,11 +1588,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1576 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1588 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1577 crtc = intel_get_crtc_for_plane(dev, 1); 1589 crtc = intel_get_crtc_for_plane(dev, 1);
1578 if (intel_crtc_active(crtc)) { 1590 if (intel_crtc_active(crtc)) {
1591 const struct drm_display_mode *adjusted_mode;
1579 int cpp = crtc->fb->bits_per_pixel / 8; 1592 int cpp = crtc->fb->bits_per_pixel / 8;
1580 if (IS_GEN2(dev)) 1593 if (IS_GEN2(dev))
1581 cpp = 4; 1594 cpp = 4;
1582 1595
1583 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1596 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1597 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1584 wm_info, fifo_size, cpp, 1598 wm_info, fifo_size, cpp,
1585 latency_ns); 1599 latency_ns);
1586 if (enabled == NULL) 1600 if (enabled == NULL)
@@ -1607,9 +1621,11 @@ static void i9xx_update_wm(struct drm_device *dev)
1607 if (HAS_FW_BLC(dev) && enabled) { 1621 if (HAS_FW_BLC(dev) && enabled) {
1608 /* self-refresh has much higher latency */ 1622 /* self-refresh has much higher latency */
1609 static const int sr_latency_ns = 6000; 1623 static const int sr_latency_ns = 6000;
1610 int clock = enabled->mode.clock; 1624 const struct drm_display_mode *adjusted_mode =
1611 int htotal = enabled->mode.htotal; 1625 &to_intel_crtc(enabled)->config.adjusted_mode;
1612 int hdisplay = enabled->mode.hdisplay; 1626 int clock = adjusted_mode->crtc_clock;
1627 int htotal = adjusted_mode->htotal;
1628 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1613 int pixel_size = enabled->fb->bits_per_pixel / 8; 1629 int pixel_size = enabled->fb->bits_per_pixel / 8;
1614 unsigned long line_time_us; 1630 unsigned long line_time_us;
1615 int entries; 1631 int entries;
@@ -1658,10 +1674,12 @@ static void i9xx_update_wm(struct drm_device *dev)
1658 } 1674 }
1659} 1675}
1660 1676
1661static void i830_update_wm(struct drm_device *dev) 1677static void i830_update_wm(struct drm_crtc *unused_crtc)
1662{ 1678{
1679 struct drm_device *dev = unused_crtc->dev;
1663 struct drm_i915_private *dev_priv = dev->dev_private; 1680 struct drm_i915_private *dev_priv = dev->dev_private;
1664 struct drm_crtc *crtc; 1681 struct drm_crtc *crtc;
1682 const struct drm_display_mode *adjusted_mode;
1665 uint32_t fwater_lo; 1683 uint32_t fwater_lo;
1666 int planea_wm; 1684 int planea_wm;
1667 1685
@@ -1669,7 +1687,9 @@ static void i830_update_wm(struct drm_device *dev)
1669 if (crtc == NULL) 1687 if (crtc == NULL)
1670 return; 1688 return;
1671 1689
1672 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1690 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1691 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1692 &i830_wm_info,
1673 dev_priv->display.get_fifo_size(dev, 0), 1693 dev_priv->display.get_fifo_size(dev, 0),
1674 4, latency_ns); 1694 4, latency_ns);
1675 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1695 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1761,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1741 int *fbc_wm, int *display_wm, int *cursor_wm) 1761 int *fbc_wm, int *display_wm, int *cursor_wm)
1742{ 1762{
1743 struct drm_crtc *crtc; 1763 struct drm_crtc *crtc;
1764 const struct drm_display_mode *adjusted_mode;
1744 unsigned long line_time_us; 1765 unsigned long line_time_us;
1745 int hdisplay, htotal, pixel_size, clock; 1766 int hdisplay, htotal, pixel_size, clock;
1746 int line_count, line_size; 1767 int line_count, line_size;
@@ -1753,9 +1774,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1753 } 1774 }
1754 1775
1755 crtc = intel_get_crtc_for_plane(dev, plane); 1776 crtc = intel_get_crtc_for_plane(dev, plane);
1756 hdisplay = crtc->mode.hdisplay; 1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1757 htotal = crtc->mode.htotal; 1778 clock = adjusted_mode->crtc_clock;
1758 clock = crtc->mode.clock; 1779 htotal = adjusted_mode->htotal;
1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1759 pixel_size = crtc->fb->bits_per_pixel / 8; 1781 pixel_size = crtc->fb->bits_per_pixel / 8;
1760 1782
1761 line_time_us = (htotal * 1000) / clock; 1783 line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1807,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1785 display, cursor); 1807 display, cursor);
1786} 1808}
1787 1809
1788static void ironlake_update_wm(struct drm_device *dev) 1810static void ironlake_update_wm(struct drm_crtc *crtc)
1789{ 1811{
1812 struct drm_device *dev = crtc->dev;
1790 struct drm_i915_private *dev_priv = dev->dev_private; 1813 struct drm_i915_private *dev_priv = dev->dev_private;
1791 int fbc_wm, plane_wm, cursor_wm; 1814 int fbc_wm, plane_wm, cursor_wm;
1792 unsigned int enabled; 1815 unsigned int enabled;
@@ -1868,8 +1891,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1868 */ 1891 */
1869} 1892}
1870 1893
1871static void sandybridge_update_wm(struct drm_device *dev) 1894static void sandybridge_update_wm(struct drm_crtc *crtc)
1872{ 1895{
1896 struct drm_device *dev = crtc->dev;
1873 struct drm_i915_private *dev_priv = dev->dev_private; 1897 struct drm_i915_private *dev_priv = dev->dev_private;
1874 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 1898 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1875 u32 val; 1899 u32 val;
@@ -1970,8 +1994,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
1970 cursor_wm); 1994 cursor_wm);
1971} 1995}
1972 1996
1973static void ivybridge_update_wm(struct drm_device *dev) 1997static void ivybridge_update_wm(struct drm_crtc *crtc)
1974{ 1998{
1999 struct drm_device *dev = crtc->dev;
1975 struct drm_i915_private *dev_priv = dev->dev_private; 2000 struct drm_i915_private *dev_priv = dev->dev_private;
1976 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 2001 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1977 u32 val; 2002 u32 val;
@@ -2098,7 +2123,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2099 uint32_t pixel_rate; 2124 uint32_t pixel_rate;
2100 2125
2101 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2126 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
2102 2127
2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2128 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2104 * adjust the pixel_rate here. */ 2129 * adjust the pixel_rate here. */
@@ -2107,8 +2132,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2107 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 2132 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2108 uint32_t pfit_size = intel_crtc->config.pch_pfit.size; 2133 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
2109 2134
2110 pipe_w = intel_crtc->config.requested_mode.hdisplay; 2135 pipe_w = intel_crtc->config.pipe_src_w;
2111 pipe_h = intel_crtc->config.requested_mode.vdisplay; 2136 pipe_h = intel_crtc->config.pipe_src_h;
2112 pfit_w = (pfit_size >> 16) & 0xFFFF; 2137 pfit_w = (pfit_size >> 16) & 0xFFFF;
2113 pfit_h = pfit_size & 0xFFFF; 2138 pfit_h = pfit_size & 0xFFFF;
2114 if (pipe_w < pfit_w) 2139 if (pipe_w < pfit_w)
@@ -2176,27 +2201,18 @@ struct hsw_wm_maximums {
2176 uint16_t fbc; 2201 uint16_t fbc;
2177}; 2202};
2178 2203
2179struct hsw_wm_values {
2180 uint32_t wm_pipe[3];
2181 uint32_t wm_lp[3];
2182 uint32_t wm_lp_spr[3];
2183 uint32_t wm_linetime[3];
2184 bool enable_fbc_wm;
2185};
2186
2187/* used in computing the new watermarks state */ 2204/* used in computing the new watermarks state */
2188struct intel_wm_config { 2205struct intel_wm_config {
2189 unsigned int num_pipes_active; 2206 unsigned int num_pipes_active;
2190 bool sprites_enabled; 2207 bool sprites_enabled;
2191 bool sprites_scaled; 2208 bool sprites_scaled;
2192 bool fbc_wm_enabled;
2193}; 2209};
2194 2210
2195/* 2211/*
2196 * For both WM_PIPE and WM_LP. 2212 * For both WM_PIPE and WM_LP.
2197 * mem_value must be in 0.1us units. 2213 * mem_value must be in 0.1us units.
2198 */ 2214 */
2199static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, 2215static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
2200 uint32_t mem_value, 2216 uint32_t mem_value,
2201 bool is_lp) 2217 bool is_lp)
2202{ 2218{
@@ -2225,7 +2241,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2225 * For both WM_PIPE and WM_LP. 2241 * For both WM_PIPE and WM_LP.
2226 * mem_value must be in 0.1us units. 2242 * mem_value must be in 0.1us units.
2227 */ 2243 */
2228static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, 2244static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
2229 uint32_t mem_value) 2245 uint32_t mem_value)
2230{ 2246{
2231 uint32_t method1, method2; 2247 uint32_t method1, method2;
@@ -2248,7 +2264,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2248 * For both WM_PIPE and WM_LP. 2264 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units. 2265 * mem_value must be in 0.1us units.
2250 */ 2266 */
2251static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, 2267static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
2252 uint32_t mem_value) 2268 uint32_t mem_value)
2253{ 2269{
2254 if (!params->active || !params->cur.enabled) 2270 if (!params->active || !params->cur.enabled)
@@ -2262,7 +2278,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2262} 2278}
2263 2279
2264/* Only for WM_LP. */ 2280/* Only for WM_LP. */
2265static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, 2281static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
2266 uint32_t pri_val) 2282 uint32_t pri_val)
2267{ 2283{
2268 if (!params->active || !params->pri.enabled) 2284 if (!params->active || !params->pri.enabled)
@@ -2356,11 +2372,11 @@ static unsigned int ilk_fbc_wm_max(void)
2356 return 15; 2372 return 15;
2357} 2373}
2358 2374
2359static void ilk_wm_max(struct drm_device *dev, 2375static void ilk_compute_wm_maximums(struct drm_device *dev,
2360 int level, 2376 int level,
2361 const struct intel_wm_config *config, 2377 const struct intel_wm_config *config,
2362 enum intel_ddb_partitioning ddb_partitioning, 2378 enum intel_ddb_partitioning ddb_partitioning,
2363 struct hsw_wm_maximums *max) 2379 struct hsw_wm_maximums *max)
2364{ 2380{
2365 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 2381 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2366 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 2382 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2368,9 +2384,9 @@ static void ilk_wm_max(struct drm_device *dev,
2368 max->fbc = ilk_fbc_wm_max(); 2384 max->fbc = ilk_fbc_wm_max();
2369} 2385}
2370 2386
2371static bool ilk_check_wm(int level, 2387static bool ilk_validate_wm_level(int level,
2372 const struct hsw_wm_maximums *max, 2388 const struct hsw_wm_maximums *max,
2373 struct intel_wm_level *result) 2389 struct intel_wm_level *result)
2374{ 2390{
2375 bool ret; 2391 bool ret;
2376 2392
@@ -2406,14 +2422,12 @@ static bool ilk_check_wm(int level,
2406 result->enable = true; 2422 result->enable = true;
2407 } 2423 }
2408 2424
2409 DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2410
2411 return ret; 2425 return ret;
2412} 2426}
2413 2427
2414static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 2428static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2415 int level, 2429 int level,
2416 struct hsw_pipe_wm_parameters *p, 2430 const struct hsw_pipe_wm_parameters *p,
2417 struct intel_wm_level *result) 2431 struct intel_wm_level *result)
2418{ 2432{
2419 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2433 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2448,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2434 result->enable = true; 2448 result->enable = true;
2435} 2449}
2436 2450
2437static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
2438 int level, struct hsw_wm_maximums *max,
2439 struct hsw_pipe_wm_parameters *params,
2440 struct intel_wm_level *result)
2441{
2442 enum pipe pipe;
2443 struct intel_wm_level res[3];
2444
2445 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
2446 ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
2447
2448 result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
2449 result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
2450 result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
2451 result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
2452 result->enable = true;
2453
2454 return ilk_check_wm(level, max, result);
2455}
2456
2457static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2458 enum pipe pipe,
2459 struct hsw_pipe_wm_parameters *params)
2460{
2461 uint32_t pri_val, cur_val, spr_val;
2462 /* WM0 latency values stored in 0.1us units */
2463 uint16_t pri_latency = dev_priv->wm.pri_latency[0];
2464 uint16_t spr_latency = dev_priv->wm.spr_latency[0];
2465 uint16_t cur_latency = dev_priv->wm.cur_latency[0];
2466
2467 pri_val = ilk_compute_pri_wm(params, pri_latency, false);
2468 spr_val = ilk_compute_spr_wm(params, spr_latency);
2469 cur_val = ilk_compute_cur_wm(params, cur_latency);
2470
2471 WARN(pri_val > 127,
2472 "Primary WM error, mode not supported for pipe %c\n",
2473 pipe_name(pipe));
2474 WARN(spr_val > 127,
2475 "Sprite WM error, mode not supported for pipe %c\n",
2476 pipe_name(pipe));
2477 WARN(cur_val > 63,
2478 "Cursor WM error, mode not supported for pipe %c\n",
2479 pipe_name(pipe));
2480
2481 return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2482 (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2483 cur_val;
2484}
2485
2486static uint32_t 2451static uint32_t
2487hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) 2452hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2488{ 2453{
@@ -2554,19 +2519,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2554 wm[3] *= 2; 2519 wm[3] *= 2;
2555} 2520}
2556 2521
2557static void intel_print_wm_latency(struct drm_device *dev, 2522static int ilk_wm_max_level(const struct drm_device *dev)
2558 const char *name,
2559 const uint16_t wm[5])
2560{ 2523{
2561 int level, max_level;
2562
2563 /* how many WM levels are we expecting */ 2524 /* how many WM levels are we expecting */
2564 if (IS_HASWELL(dev)) 2525 if (IS_HASWELL(dev))
2565 max_level = 4; 2526 return 4;
2566 else if (INTEL_INFO(dev)->gen >= 6) 2527 else if (INTEL_INFO(dev)->gen >= 6)
2567 max_level = 3; 2528 return 3;
2568 else 2529 else
2569 max_level = 2; 2530 return 2;
2531}
2532
2533static void intel_print_wm_latency(struct drm_device *dev,
2534 const char *name,
2535 const uint16_t wm[5])
2536{
2537 int level, max_level = ilk_wm_max_level(dev);
2570 2538
2571 for (level = 0; level <= max_level; level++) { 2539 for (level = 0; level <= max_level; level++) {
2572 unsigned int latency = wm[level]; 2540 unsigned int latency = wm[level];
@@ -2606,101 +2574,154 @@ static void intel_setup_wm_latency(struct drm_device *dev)
2606 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2574 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2607} 2575}
2608 2576
2609static void hsw_compute_wm_parameters(struct drm_device *dev, 2577static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2610 struct hsw_pipe_wm_parameters *params, 2578 struct hsw_pipe_wm_parameters *p,
2611 struct hsw_wm_maximums *lp_max_1_2, 2579 struct intel_wm_config *config)
2612 struct hsw_wm_maximums *lp_max_5_6)
2613{ 2580{
2614 struct drm_crtc *crtc; 2581 struct drm_device *dev = crtc->dev;
2582 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2583 enum pipe pipe = intel_crtc->pipe;
2615 struct drm_plane *plane; 2584 struct drm_plane *plane;
2616 enum pipe pipe;
2617 struct intel_wm_config config = {};
2618
2619 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2621 struct hsw_pipe_wm_parameters *p;
2622
2623 pipe = intel_crtc->pipe;
2624 p = &params[pipe];
2625
2626 p->active = intel_crtc_active(crtc);
2627 if (!p->active)
2628 continue;
2629
2630 config.num_pipes_active++;
2631 2585
2586 p->active = intel_crtc_active(crtc);
2587 if (p->active) {
2632 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; 2588 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2633 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2589 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2634 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2590 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2635 p->cur.bytes_per_pixel = 4; 2591 p->cur.bytes_per_pixel = 4;
2636 p->pri.horiz_pixels = 2592 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2637 intel_crtc->config.requested_mode.hdisplay;
2638 p->cur.horiz_pixels = 64; 2593 p->cur.horiz_pixels = 64;
2639 /* TODO: for now, assume primary and cursor planes are always enabled. */ 2594 /* TODO: for now, assume primary and cursor planes are always enabled. */
2640 p->pri.enabled = true; 2595 p->pri.enabled = true;
2641 p->cur.enabled = true; 2596 p->cur.enabled = true;
2642 } 2597 }
2643 2598
2599 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2600 config->num_pipes_active += intel_crtc_active(crtc);
2601
2644 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2602 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2645 struct intel_plane *intel_plane = to_intel_plane(plane); 2603 struct intel_plane *intel_plane = to_intel_plane(plane);
2646 struct hsw_pipe_wm_parameters *p;
2647
2648 pipe = intel_plane->pipe;
2649 p = &params[pipe];
2650 2604
2651 p->spr = intel_plane->wm; 2605 if (intel_plane->pipe == pipe)
2606 p->spr = intel_plane->wm;
2652 2607
2653 config.sprites_enabled |= p->spr.enabled; 2608 config->sprites_enabled |= intel_plane->wm.enabled;
2654 config.sprites_scaled |= p->spr.scaled; 2609 config->sprites_scaled |= intel_plane->wm.scaled;
2655 } 2610 }
2611}
2656 2612
2657 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); 2613/* Compute new watermarks for the pipe */
2614static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2615 const struct hsw_pipe_wm_parameters *params,
2616 struct intel_pipe_wm *pipe_wm)
2617{
2618 struct drm_device *dev = crtc->dev;
2619 struct drm_i915_private *dev_priv = dev->dev_private;
2620 int level, max_level = ilk_wm_max_level(dev);
2621 /* LP0 watermark maximums depend on this pipe alone */
2622 struct intel_wm_config config = {
2623 .num_pipes_active = 1,
2624 .sprites_enabled = params->spr.enabled,
2625 .sprites_scaled = params->spr.scaled,
2626 };
2627 struct hsw_wm_maximums max;
2658 2628
2659 /* 5/6 split only in single pipe config on IVB+ */ 2629 /* LP0 watermarks always use 1/2 DDB partitioning */
2660 if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) 2630 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2661 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6); 2631
2662 else 2632 for (level = 0; level <= max_level; level++)
2663 *lp_max_5_6 = *lp_max_1_2; 2633 ilk_compute_wm_level(dev_priv, level, params,
2634 &pipe_wm->wm[level]);
2635
2636 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2637
2638 /* At least LP0 must be valid */
2639 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
2664} 2640}
2665 2641
2666static void hsw_compute_wm_results(struct drm_device *dev, 2642/*
2667 struct hsw_pipe_wm_parameters *params, 2643 * Merge the watermarks from all active pipes for a specific level.
2668 struct hsw_wm_maximums *lp_maximums, 2644 */
2669 struct hsw_wm_values *results) 2645static void ilk_merge_wm_level(struct drm_device *dev,
2646 int level,
2647 struct intel_wm_level *ret_wm)
2670{ 2648{
2671 struct drm_i915_private *dev_priv = dev->dev_private; 2649 const struct intel_crtc *intel_crtc;
2672 struct drm_crtc *crtc;
2673 struct intel_wm_level lp_results[4] = {};
2674 enum pipe pipe;
2675 int level, max_level, wm_lp;
2676 2650
2677 for (level = 1; level <= 4; level++) 2651 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2678 if (!hsw_compute_lp_wm(dev_priv, level, 2652 const struct intel_wm_level *wm =
2679 lp_maximums, params, 2653 &intel_crtc->wm.active.wm[level];
2680 &lp_results[level - 1])) 2654
2681 break; 2655 if (!wm->enable)
2682 max_level = level - 1; 2656 return;
2683 2657
2684 memset(results, 0, sizeof(*results)); 2658 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2659 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2660 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2661 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2662 }
2685 2663
2686 /* The spec says it is preferred to disable FBC WMs instead of disabling 2664 ret_wm->enable = true;
2687 * a WM level. */ 2665}
2688 results->enable_fbc_wm = true; 2666
2667/*
2668 * Merge all low power watermarks for all active pipes.
2669 */
2670static void ilk_wm_merge(struct drm_device *dev,
2671 const struct hsw_wm_maximums *max,
2672 struct intel_pipe_wm *merged)
2673{
2674 int level, max_level = ilk_wm_max_level(dev);
2675
2676 merged->fbc_wm_enabled = true;
2677
2678 /* merge each WM1+ level */
2689 for (level = 1; level <= max_level; level++) { 2679 for (level = 1; level <= max_level; level++) {
2690 if (lp_results[level - 1].fbc_val > lp_maximums->fbc) { 2680 struct intel_wm_level *wm = &merged->wm[level];
2691 results->enable_fbc_wm = false; 2681
2692 lp_results[level - 1].fbc_val = 0; 2682 ilk_merge_wm_level(dev, level, wm);
2683
2684 if (!ilk_validate_wm_level(level, max, wm))
2685 break;
2686
2687 /*
2688 * The spec says it is preferred to disable
2689 * FBC WMs instead of disabling a WM level.
2690 */
2691 if (wm->fbc_val > max->fbc) {
2692 merged->fbc_wm_enabled = false;
2693 wm->fbc_val = 0;
2693 } 2694 }
2694 } 2695 }
2696}
2697
2698static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2699{
2700 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2701 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2702}
2695 2703
2704static void hsw_compute_wm_results(struct drm_device *dev,
2705 const struct intel_pipe_wm *merged,
2706 enum intel_ddb_partitioning partitioning,
2707 struct hsw_wm_values *results)
2708{
2709 struct intel_crtc *intel_crtc;
2710 int level, wm_lp;
2711
2712 results->enable_fbc_wm = merged->fbc_wm_enabled;
2713 results->partitioning = partitioning;
2714
2715 /* LP1+ register values */
2696 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2716 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2697 const struct intel_wm_level *r; 2717 const struct intel_wm_level *r;
2698 2718
2699 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; 2719 level = ilk_wm_lp_to_level(wm_lp, merged);
2700 if (level > max_level) 2720
2721 r = &merged->wm[level];
2722 if (!r->enable)
2701 break; 2723 break;
2702 2724
2703 r = &lp_results[level - 1];
2704 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, 2725 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2705 r->fbc_val, 2726 r->fbc_val,
2706 r->pri_val, 2727 r->pri_val,
@@ -2708,116 +2729,158 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2708 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2729 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2709 } 2730 }
2710 2731
2711 for_each_pipe(pipe) 2732 /* LP0 register values */
2712 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe, 2733 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2713 &params[pipe]); 2734 enum pipe pipe = intel_crtc->pipe;
2735 const struct intel_wm_level *r =
2736 &intel_crtc->wm.active.wm[0];
2714 2737
2715 for_each_pipe(pipe) { 2738 if (WARN_ON(!r->enable))
2716 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2739 continue;
2717 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc); 2740
2741 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2742
2743 results->wm_pipe[pipe] =
2744 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2745 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2746 r->cur_val;
2718 } 2747 }
2719} 2748}
2720 2749
2721/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2750/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2722 * case both are at the same level. Prefer r1 in case they're the same. */ 2751 * case both are at the same level. Prefer r1 in case they're the same. */
2723static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, 2752static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
2724 struct hsw_wm_values *r2) 2753 struct intel_pipe_wm *r1,
2754 struct intel_pipe_wm *r2)
2725{ 2755{
2726 int i, val_r1 = 0, val_r2 = 0; 2756 int level, max_level = ilk_wm_max_level(dev);
2757 int level1 = 0, level2 = 0;
2727 2758
2728 for (i = 0; i < 3; i++) { 2759 for (level = 1; level <= max_level; level++) {
2729 if (r1->wm_lp[i] & WM3_LP_EN) 2760 if (r1->wm[level].enable)
2730 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK; 2761 level1 = level;
2731 if (r2->wm_lp[i] & WM3_LP_EN) 2762 if (r2->wm[level].enable)
2732 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK; 2763 level2 = level;
2733 } 2764 }
2734 2765
2735 if (val_r1 == val_r2) { 2766 if (level1 == level2) {
2736 if (r2->enable_fbc_wm && !r1->enable_fbc_wm) 2767 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2737 return r2; 2768 return r2;
2738 else 2769 else
2739 return r1; 2770 return r1;
2740 } else if (val_r1 > val_r2) { 2771 } else if (level1 > level2) {
2741 return r1; 2772 return r1;
2742 } else { 2773 } else {
2743 return r2; 2774 return r2;
2744 } 2775 }
2745} 2776}
2746 2777
2778/* dirty bits used to track which watermarks need changes */
2779#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2780#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2781#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2782#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2783#define WM_DIRTY_FBC (1 << 24)
2784#define WM_DIRTY_DDB (1 << 25)
2785
2786static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2787 const struct hsw_wm_values *old,
2788 const struct hsw_wm_values *new)
2789{
2790 unsigned int dirty = 0;
2791 enum pipe pipe;
2792 int wm_lp;
2793
2794 for_each_pipe(pipe) {
2795 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2796 dirty |= WM_DIRTY_LINETIME(pipe);
2797 /* Must disable LP1+ watermarks too */
2798 dirty |= WM_DIRTY_LP_ALL;
2799 }
2800
2801 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2802 dirty |= WM_DIRTY_PIPE(pipe);
2803 /* Must disable LP1+ watermarks too */
2804 dirty |= WM_DIRTY_LP_ALL;
2805 }
2806 }
2807
2808 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2809 dirty |= WM_DIRTY_FBC;
2810 /* Must disable LP1+ watermarks too */
2811 dirty |= WM_DIRTY_LP_ALL;
2812 }
2813
2814 if (old->partitioning != new->partitioning) {
2815 dirty |= WM_DIRTY_DDB;
2816 /* Must disable LP1+ watermarks too */
2817 dirty |= WM_DIRTY_LP_ALL;
2818 }
2819
2820 /* LP1+ watermarks already deemed dirty, no need to continue */
2821 if (dirty & WM_DIRTY_LP_ALL)
2822 return dirty;
2823
2824 /* Find the lowest numbered LP1+ watermark in need of an update... */
2825 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2826 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2827 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2828 break;
2829 }
2830
2831 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2832 for (; wm_lp <= 3; wm_lp++)
2833 dirty |= WM_DIRTY_LP(wm_lp);
2834
2835 return dirty;
2836}
2837
2747/* 2838/*
2748 * The spec says we shouldn't write when we don't need, because every write 2839 * The spec says we shouldn't write when we don't need, because every write
2749 * causes WMs to be re-evaluated, expending some power. 2840 * causes WMs to be re-evaluated, expending some power.
2750 */ 2841 */
2751static void hsw_write_wm_values(struct drm_i915_private *dev_priv, 2842static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2752 struct hsw_wm_values *results, 2843 struct hsw_wm_values *results)
2753 enum intel_ddb_partitioning partitioning)
2754{ 2844{
2755 struct hsw_wm_values previous; 2845 struct hsw_wm_values *previous = &dev_priv->wm.hw;
2846 unsigned int dirty;
2756 uint32_t val; 2847 uint32_t val;
2757 enum intel_ddb_partitioning prev_partitioning; 2848
2758 bool prev_enable_fbc_wm; 2849 dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
2759 2850 if (!dirty)
2760 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2761 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2762 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2763 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2764 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2765 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2766 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2767 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2768 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2769 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2770 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2771 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2772
2773 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2774 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2775
2776 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2777
2778 if (memcmp(results->wm_pipe, previous.wm_pipe,
2779 sizeof(results->wm_pipe)) == 0 &&
2780 memcmp(results->wm_lp, previous.wm_lp,
2781 sizeof(results->wm_lp)) == 0 &&
2782 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2783 sizeof(results->wm_lp_spr)) == 0 &&
2784 memcmp(results->wm_linetime, previous.wm_linetime,
2785 sizeof(results->wm_linetime)) == 0 &&
2786 partitioning == prev_partitioning &&
2787 results->enable_fbc_wm == prev_enable_fbc_wm)
2788 return; 2851 return;
2789 2852
2790 if (previous.wm_lp[2] != 0) 2853 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
2791 I915_WRITE(WM3_LP_ILK, 0); 2854 I915_WRITE(WM3_LP_ILK, 0);
2792 if (previous.wm_lp[1] != 0) 2855 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
2793 I915_WRITE(WM2_LP_ILK, 0); 2856 I915_WRITE(WM2_LP_ILK, 0);
2794 if (previous.wm_lp[0] != 0) 2857 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
2795 I915_WRITE(WM1_LP_ILK, 0); 2858 I915_WRITE(WM1_LP_ILK, 0);
2796 2859
2797 if (previous.wm_pipe[0] != results->wm_pipe[0]) 2860 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2798 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2861 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2799 if (previous.wm_pipe[1] != results->wm_pipe[1]) 2862 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2800 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2863 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2801 if (previous.wm_pipe[2] != results->wm_pipe[2]) 2864 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2802 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2865 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2803 2866
2804 if (previous.wm_linetime[0] != results->wm_linetime[0]) 2867 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2805 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2868 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2806 if (previous.wm_linetime[1] != results->wm_linetime[1]) 2869 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2807 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2870 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2808 if (previous.wm_linetime[2] != results->wm_linetime[2]) 2871 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2809 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2872 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2810 2873
2811 if (prev_partitioning != partitioning) { 2874 if (dirty & WM_DIRTY_DDB) {
2812 val = I915_READ(WM_MISC); 2875 val = I915_READ(WM_MISC);
2813 if (partitioning == INTEL_DDB_PART_1_2) 2876 if (results->partitioning == INTEL_DDB_PART_1_2)
2814 val &= ~WM_MISC_DATA_PARTITION_5_6; 2877 val &= ~WM_MISC_DATA_PARTITION_5_6;
2815 else 2878 else
2816 val |= WM_MISC_DATA_PARTITION_5_6; 2879 val |= WM_MISC_DATA_PARTITION_5_6;
2817 I915_WRITE(WM_MISC, val); 2880 I915_WRITE(WM_MISC, val);
2818 } 2881 }
2819 2882
2820 if (prev_enable_fbc_wm != results->enable_fbc_wm) { 2883 if (dirty & WM_DIRTY_FBC) {
2821 val = I915_READ(DISP_ARB_CTL); 2884 val = I915_READ(DISP_ARB_CTL);
2822 if (results->enable_fbc_wm) 2885 if (results->enable_fbc_wm)
2823 val &= ~DISP_FBC_WM_DIS; 2886 val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2889,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2826 I915_WRITE(DISP_ARB_CTL, val); 2889 I915_WRITE(DISP_ARB_CTL, val);
2827 } 2890 }
2828 2891
2829 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0]) 2892 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2830 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2893 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2831 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1]) 2894 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2832 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2895 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2833 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2]) 2896 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2834 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2897 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2835 2898
2836 if (results->wm_lp[0] != 0) 2899 if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
2837 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2900 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2838 if (results->wm_lp[1] != 0) 2901 if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
2839 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2902 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2840 if (results->wm_lp[2] != 0) 2903 if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
2841 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2904 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2905
2906 dev_priv->wm.hw = *results;
2842} 2907}
2843 2908
2844static void haswell_update_wm(struct drm_device *dev) 2909static void haswell_update_wm(struct drm_crtc *crtc)
2845{ 2910{
2911 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2912 struct drm_device *dev = crtc->dev;
2846 struct drm_i915_private *dev_priv = dev->dev_private; 2913 struct drm_i915_private *dev_priv = dev->dev_private;
2847 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; 2914 struct hsw_wm_maximums max;
2848 struct hsw_pipe_wm_parameters params[3]; 2915 struct hsw_pipe_wm_parameters params = {};
2849 struct hsw_wm_values results_1_2, results_5_6, *best_results; 2916 struct hsw_wm_values results = {};
2850 enum intel_ddb_partitioning partitioning; 2917 enum intel_ddb_partitioning partitioning;
2918 struct intel_pipe_wm pipe_wm = {};
2919 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2920 struct intel_wm_config config = {};
2921
2922 hsw_compute_wm_parameters(crtc, &params, &config);
2851 2923
2852 hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); 2924 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2925
2926 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2927 return;
2853 2928
2854 hsw_compute_wm_results(dev, params, 2929 intel_crtc->wm.active = pipe_wm;
2855 &lp_max_1_2, &results_1_2); 2930
2856 if (lp_max_1_2.pri != lp_max_5_6.pri) { 2931 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2857 hsw_compute_wm_results(dev, params, 2932 ilk_wm_merge(dev, &max, &lp_wm_1_2);
2858 &lp_max_5_6, &results_5_6); 2933
2859 best_results = hsw_find_best_result(&results_1_2, &results_5_6); 2934 /* 5/6 split only in single pipe config on IVB+ */
2935 if (INTEL_INFO(dev)->gen >= 7 &&
2936 config.num_pipes_active == 1 && config.sprites_enabled) {
2937 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2938 ilk_wm_merge(dev, &max, &lp_wm_5_6);
2939
2940 best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2860 } else { 2941 } else {
2861 best_results = &results_1_2; 2942 best_lp_wm = &lp_wm_1_2;
2862 } 2943 }
2863 2944
2864 partitioning = (best_results == &results_1_2) ? 2945 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2865 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 2946 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2866 2947
2867 hsw_write_wm_values(dev_priv, best_results, partitioning); 2948 hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2949
2950 hsw_write_wm_values(dev_priv, &results);
2868} 2951}
2869 2952
2870static void haswell_update_sprite_wm(struct drm_plane *plane, 2953static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2962,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
2879 intel_plane->wm.horiz_pixels = sprite_width; 2962 intel_plane->wm.horiz_pixels = sprite_width;
2880 intel_plane->wm.bytes_per_pixel = pixel_size; 2963 intel_plane->wm.bytes_per_pixel = pixel_size;
2881 2964
2882 haswell_update_wm(plane->dev); 2965 haswell_update_wm(crtc);
2883} 2966}
2884 2967
2885static bool 2968static bool
@@ -2898,7 +2981,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2898 return false; 2981 return false;
2899 } 2982 }
2900 2983
2901 clock = crtc->mode.clock; 2984 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2902 2985
2903 /* Use the small buffer method to calculate the sprite watermark */ 2986 /* Use the small buffer method to calculate the sprite watermark */
2904 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 2987 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3016,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2933 } 3016 }
2934 3017
2935 crtc = intel_get_crtc_for_plane(dev, plane); 3018 crtc = intel_get_crtc_for_plane(dev, plane);
2936 clock = crtc->mode.clock; 3019 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2937 if (!clock) { 3020 if (!clock) {
2938 *sprite_wm = 0; 3021 *sprite_wm = 0;
2939 return false; 3022 return false;
@@ -3044,6 +3127,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3044 I915_WRITE(WM3S_LP_IVB, sprite_wm); 3127 I915_WRITE(WM3S_LP_IVB, sprite_wm);
3045} 3128}
3046 3129
3130static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3131{
3132 struct drm_device *dev = crtc->dev;
3133 struct drm_i915_private *dev_priv = dev->dev_private;
3134 struct hsw_wm_values *hw = &dev_priv->wm.hw;
3135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3136 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3137 enum pipe pipe = intel_crtc->pipe;
3138 static const unsigned int wm0_pipe_reg[] = {
3139 [PIPE_A] = WM0_PIPEA_ILK,
3140 [PIPE_B] = WM0_PIPEB_ILK,
3141 [PIPE_C] = WM0_PIPEC_IVB,
3142 };
3143
3144 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3145 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3146
3147 if (intel_crtc_active(crtc)) {
3148 u32 tmp = hw->wm_pipe[pipe];
3149
3150 /*
3151 * For active pipes LP0 watermark is marked as
3152 * enabled, and LP1+ watermaks as disabled since
3153 * we can't really reverse compute them in case
3154 * multiple pipes are active.
3155 */
3156 active->wm[0].enable = true;
3157 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3158 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3159 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3160 active->linetime = hw->wm_linetime[pipe];
3161 } else {
3162 int level, max_level = ilk_wm_max_level(dev);
3163
3164 /*
3165 * For inactive pipes, all watermark levels
3166 * should be marked as enabled but zeroed,
3167 * which is what we'd compute them to.
3168 */
3169 for (level = 0; level <= max_level; level++)
3170 active->wm[level].enable = true;
3171 }
3172}
3173
3174void ilk_wm_get_hw_state(struct drm_device *dev)
3175{
3176 struct drm_i915_private *dev_priv = dev->dev_private;
3177 struct hsw_wm_values *hw = &dev_priv->wm.hw;
3178 struct drm_crtc *crtc;
3179
3180 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3181 ilk_pipe_wm_get_hw_state(crtc);
3182
3183 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
3184 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
3185 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
3186
3187 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
3188 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3189 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3190
3191 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3192 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3193
3194 hw->enable_fbc_wm =
3195 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3196}
3197
3047/** 3198/**
3048 * intel_update_watermarks - update FIFO watermark values based on current modes 3199 * intel_update_watermarks - update FIFO watermark values based on current modes
3049 * 3200 *
@@ -3076,12 +3227,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3076 * We don't use the sprite, so we can ignore that. And on Crestline we have 3227 * We don't use the sprite, so we can ignore that. And on Crestline we have
3077 * to set the non-SR watermarks to 8. 3228 * to set the non-SR watermarks to 8.
3078 */ 3229 */
3079void intel_update_watermarks(struct drm_device *dev) 3230void intel_update_watermarks(struct drm_crtc *crtc)
3080{ 3231{
3081 struct drm_i915_private *dev_priv = dev->dev_private; 3232 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3082 3233
3083 if (dev_priv->display.update_wm) 3234 if (dev_priv->display.update_wm)
3084 dev_priv->display.update_wm(dev); 3235 dev_priv->display.update_wm(crtc);
3085} 3236}
3086 3237
3087void intel_update_sprite_watermarks(struct drm_plane *plane, 3238void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3438,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3287 return limits; 3438 return limits;
3288} 3439}
3289 3440
3441static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3442{
3443 int new_power;
3444
3445 new_power = dev_priv->rps.power;
3446 switch (dev_priv->rps.power) {
3447 case LOW_POWER:
3448 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3449 new_power = BETWEEN;
3450 break;
3451
3452 case BETWEEN:
3453 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3454 new_power = LOW_POWER;
3455 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3456 new_power = HIGH_POWER;
3457 break;
3458
3459 case HIGH_POWER:
3460 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3461 new_power = BETWEEN;
3462 break;
3463 }
3464 /* Max/min bins are special */
3465 if (val == dev_priv->rps.min_delay)
3466 new_power = LOW_POWER;
3467 if (val == dev_priv->rps.max_delay)
3468 new_power = HIGH_POWER;
3469 if (new_power == dev_priv->rps.power)
3470 return;
3471
3472 /* Note the units here are not exactly 1us, but 1280ns. */
3473 switch (new_power) {
3474 case LOW_POWER:
3475 /* Upclock if more than 95% busy over 16ms */
3476 I915_WRITE(GEN6_RP_UP_EI, 12500);
3477 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3478
3479 /* Downclock if less than 85% busy over 32ms */
3480 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3481 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3482
3483 I915_WRITE(GEN6_RP_CONTROL,
3484 GEN6_RP_MEDIA_TURBO |
3485 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3486 GEN6_RP_MEDIA_IS_GFX |
3487 GEN6_RP_ENABLE |
3488 GEN6_RP_UP_BUSY_AVG |
3489 GEN6_RP_DOWN_IDLE_AVG);
3490 break;
3491
3492 case BETWEEN:
3493 /* Upclock if more than 90% busy over 13ms */
3494 I915_WRITE(GEN6_RP_UP_EI, 10250);
3495 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3496
3497 /* Downclock if less than 75% busy over 32ms */
3498 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3499 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3500
3501 I915_WRITE(GEN6_RP_CONTROL,
3502 GEN6_RP_MEDIA_TURBO |
3503 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3504 GEN6_RP_MEDIA_IS_GFX |
3505 GEN6_RP_ENABLE |
3506 GEN6_RP_UP_BUSY_AVG |
3507 GEN6_RP_DOWN_IDLE_AVG);
3508 break;
3509
3510 case HIGH_POWER:
3511 /* Upclock if more than 85% busy over 10ms */
3512 I915_WRITE(GEN6_RP_UP_EI, 8000);
3513 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3514
3515 /* Downclock if less than 60% busy over 32ms */
3516 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3517 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3518
3519 I915_WRITE(GEN6_RP_CONTROL,
3520 GEN6_RP_MEDIA_TURBO |
3521 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3522 GEN6_RP_MEDIA_IS_GFX |
3523 GEN6_RP_ENABLE |
3524 GEN6_RP_UP_BUSY_AVG |
3525 GEN6_RP_DOWN_IDLE_AVG);
3526 break;
3527 }
3528
3529 dev_priv->rps.power = new_power;
3530 dev_priv->rps.last_adj = 0;
3531}
3532
3290void gen6_set_rps(struct drm_device *dev, u8 val) 3533void gen6_set_rps(struct drm_device *dev, u8 val)
3291{ 3534{
3292 struct drm_i915_private *dev_priv = dev->dev_private; 3535 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3542,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3299 if (val == dev_priv->rps.cur_delay) 3542 if (val == dev_priv->rps.cur_delay)
3300 return; 3543 return;
3301 3544
3545 gen6_set_rps_thresholds(dev_priv, val);
3546
3302 if (IS_HASWELL(dev)) 3547 if (IS_HASWELL(dev))
3303 I915_WRITE(GEN6_RPNSWREQ, 3548 I915_WRITE(GEN6_RPNSWREQ,
3304 HSW_FREQUENCY(val)); 3549 HSW_FREQUENCY(val));
@@ -3320,6 +3565,32 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3320 trace_intel_gpu_freq_change(val * 50); 3565 trace_intel_gpu_freq_change(val * 50);
3321} 3566}
3322 3567
3568void gen6_rps_idle(struct drm_i915_private *dev_priv)
3569{
3570 mutex_lock(&dev_priv->rps.hw_lock);
3571 if (dev_priv->rps.enabled) {
3572 if (dev_priv->info->is_valleyview)
3573 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3574 else
3575 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3576 dev_priv->rps.last_adj = 0;
3577 }
3578 mutex_unlock(&dev_priv->rps.hw_lock);
3579}
3580
3581void gen6_rps_boost(struct drm_i915_private *dev_priv)
3582{
3583 mutex_lock(&dev_priv->rps.hw_lock);
3584 if (dev_priv->rps.enabled) {
3585 if (dev_priv->info->is_valleyview)
3586 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3587 else
3588 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3589 dev_priv->rps.last_adj = 0;
3590 }
3591 mutex_unlock(&dev_priv->rps.hw_lock);
3592}
3593
3323/* 3594/*
3324 * Wait until the previous freq change has completed, 3595 * Wait until the previous freq change has completed,
3325 * or the timeout elapsed, and then update our notion 3596 * or the timeout elapsed, and then update our notion
@@ -3415,6 +3686,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
3415 } 3686 }
3416} 3687}
3417 3688
3689static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3690{
3691 if (IS_GEN6(dev))
3692 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3693
3694 if (IS_HASWELL(dev))
3695 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3696
3697 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3698 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3699 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3700 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3701}
3702
3418int intel_enable_rc6(const struct drm_device *dev) 3703int intel_enable_rc6(const struct drm_device *dev)
3419{ 3704{
3420 /* No RC6 before Ironlake */ 3705 /* No RC6 before Ironlake */
@@ -3429,18 +3714,13 @@ int intel_enable_rc6(const struct drm_device *dev)
3429 if (INTEL_INFO(dev)->gen == 5) 3714 if (INTEL_INFO(dev)->gen == 5)
3430 return 0; 3715 return 0;
3431 3716
3432 if (IS_HASWELL(dev)) { 3717 if (IS_HASWELL(dev))
3433 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3434 return INTEL_RC6_ENABLE; 3718 return INTEL_RC6_ENABLE;
3435 }
3436 3719
3437 /* snb/ivb have more than one rc6 state. */ 3720 /* snb/ivb have more than one rc6 state. */
3438 if (INTEL_INFO(dev)->gen == 6) { 3721 if (INTEL_INFO(dev)->gen == 6)
3439 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3440 return INTEL_RC6_ENABLE; 3722 return INTEL_RC6_ENABLE;
3441 }
3442 3723
3443 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3444 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3724 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3445} 3725}
3446 3726
@@ -3501,7 +3781,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3501 3781
3502 /* In units of 50MHz */ 3782 /* In units of 50MHz */
3503 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3783 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3504 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; 3784 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3785 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
3786 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
3787 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3505 dev_priv->rps.cur_delay = 0; 3788 dev_priv->rps.cur_delay = 0;
3506 3789
3507 /* disable the counters and set deterministic thresholds */ 3790 /* disable the counters and set deterministic thresholds */
@@ -3539,48 +3822,16 @@ static void gen6_enable_rps(struct drm_device *dev)
3539 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 3822 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3540 } 3823 }
3541 3824
3542 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3825 intel_print_rc6_info(dev, rc6_mask);
3543 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3544 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3545 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3546 3826
3547 I915_WRITE(GEN6_RC_CONTROL, 3827 I915_WRITE(GEN6_RC_CONTROL,
3548 rc6_mask | 3828 rc6_mask |
3549 GEN6_RC_CTL_EI_MODE(1) | 3829 GEN6_RC_CTL_EI_MODE(1) |
3550 GEN6_RC_CTL_HW_ENABLE); 3830 GEN6_RC_CTL_HW_ENABLE);
3551 3831
3552 if (IS_HASWELL(dev)) { 3832 /* Power down if completely idle for over 50ms */
3553 I915_WRITE(GEN6_RPNSWREQ, 3833 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3554 HSW_FREQUENCY(10));
3555 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3556 HSW_FREQUENCY(12));
3557 } else {
3558 I915_WRITE(GEN6_RPNSWREQ,
3559 GEN6_FREQUENCY(10) |
3560 GEN6_OFFSET(0) |
3561 GEN6_AGGRESSIVE_TURBO);
3562 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3563 GEN6_FREQUENCY(12));
3564 }
3565
3566 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3567 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3568 dev_priv->rps.max_delay << 24 |
3569 dev_priv->rps.min_delay << 16);
3570
3571 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3572 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3573 I915_WRITE(GEN6_RP_UP_EI, 66000);
3574 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3575
3576 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3834 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3577 I915_WRITE(GEN6_RP_CONTROL,
3578 GEN6_RP_MEDIA_TURBO |
3579 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3580 GEN6_RP_MEDIA_IS_GFX |
3581 GEN6_RP_ENABLE |
3582 GEN6_RP_UP_BUSY_AVG |
3583 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3584 3835
3585 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3836 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3586 if (!ret) { 3837 if (!ret) {
@@ -3596,7 +3847,8 @@ static void gen6_enable_rps(struct drm_device *dev)
3596 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3847 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3597 } 3848 }
3598 3849
3599 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3850 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3851 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3600 3852
3601 gen6_enable_rps_interrupts(dev); 3853 gen6_enable_rps_interrupts(dev);
3602 3854
@@ -3624,23 +3876,28 @@ void gen6_update_ring_freq(struct drm_device *dev)
3624 unsigned int gpu_freq; 3876 unsigned int gpu_freq;
3625 unsigned int max_ia_freq, min_ring_freq; 3877 unsigned int max_ia_freq, min_ring_freq;
3626 int scaling_factor = 180; 3878 int scaling_factor = 180;
3879 struct cpufreq_policy *policy;
3627 3880
3628 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3881 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3629 3882
3630 max_ia_freq = cpufreq_quick_get_max(0); 3883 policy = cpufreq_cpu_get(0);
3631 /* 3884 if (policy) {
3632 * Default to measured freq if none found, PCU will ensure we don't go 3885 max_ia_freq = policy->cpuinfo.max_freq;
3633 * over 3886 cpufreq_cpu_put(policy);
3634 */ 3887 } else {
3635 if (!max_ia_freq) 3888 /*
3889 * Default to measured freq if none found, PCU will ensure we
3890 * don't go over
3891 */
3636 max_ia_freq = tsc_khz; 3892 max_ia_freq = tsc_khz;
3893 }
3637 3894
3638 /* Convert from kHz to MHz */ 3895 /* Convert from kHz to MHz */
3639 max_ia_freq /= 1000; 3896 max_ia_freq /= 1000;
3640 3897
3641 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK); 3898 min_ring_freq = I915_READ(DCLK) & 0xf;
3642 /* convert DDR frequency from units of 133.3MHz to bandwidth */ 3899 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3643 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3; 3900 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3644 3901
3645 /* 3902 /*
3646 * For each potential GPU frequency, load a ring frequency we'd like 3903 * For each potential GPU frequency, load a ring frequency we'd like
@@ -3653,7 +3910,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
3653 unsigned int ia_freq = 0, ring_freq = 0; 3910 unsigned int ia_freq = 0, ring_freq = 0;
3654 3911
3655 if (IS_HASWELL(dev)) { 3912 if (IS_HASWELL(dev)) {
3656 ring_freq = (gpu_freq * 5 + 3) / 4; 3913 ring_freq = mult_frac(gpu_freq, 5, 4);
3657 ring_freq = max(min_ring_freq, ring_freq); 3914 ring_freq = max(min_ring_freq, ring_freq);
3658 /* leave ia_freq as the default, chosen by cpufreq */ 3915 /* leave ia_freq as the default, chosen by cpufreq */
3659 } else { 3916 } else {
@@ -3709,24 +3966,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3709 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3966 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3710} 3967}
3711 3968
3712static void vlv_rps_timer_work(struct work_struct *work)
3713{
3714 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3715 rps.vlv_work.work);
3716
3717 /*
3718 * Timer fired, we must be idle. Drop to min voltage state.
3719 * Note: we use RPe here since it should match the
3720 * Vmin we were shooting for. That should give us better
3721 * perf when we come back out of RC6 than if we used the
3722 * min freq available.
3723 */
3724 mutex_lock(&dev_priv->rps.hw_lock);
3725 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3726 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3727 mutex_unlock(&dev_priv->rps.hw_lock);
3728}
3729
3730static void valleyview_setup_pctx(struct drm_device *dev) 3969static void valleyview_setup_pctx(struct drm_device *dev)
3731{ 3970{
3732 struct drm_i915_private *dev_priv = dev->dev_private; 3971 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4012,14 @@ static void valleyview_enable_rps(struct drm_device *dev)
3773{ 4012{
3774 struct drm_i915_private *dev_priv = dev->dev_private; 4013 struct drm_i915_private *dev_priv = dev->dev_private;
3775 struct intel_ring_buffer *ring; 4014 struct intel_ring_buffer *ring;
3776 u32 gtfifodbg, val; 4015 u32 gtfifodbg, val, rc6_mode = 0;
3777 int i; 4016 int i;
3778 4017
3779 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4018 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3780 4019
3781 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 4020 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3782 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 4021 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4022 gtfifodbg);
3783 I915_WRITE(GTFIFODBG, gtfifodbg); 4023 I915_WRITE(GTFIFODBG, gtfifodbg);
3784 } 4024 }
3785 4025
@@ -3812,9 +4052,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
3812 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350); 4052 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3813 4053
3814 /* allows RC6 residency counter to work */ 4054 /* allows RC6 residency counter to work */
3815 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3)); 4055 I915_WRITE(VLV_COUNTER_CONTROL,
3816 I915_WRITE(GEN6_RC_CONTROL, 4056 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3817 GEN7_RC_CTL_TO_MODE); 4057 VLV_MEDIA_RC6_COUNT_EN |
4058 VLV_RENDER_RC6_COUNT_EN));
4059 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4060 rc6_mode = GEN7_RC_CTL_TO_MODE;
4061
4062 intel_print_rc6_info(dev, rc6_mode);
4063
4064 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3818 4065
3819 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4066 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3820 switch ((val >> 6) & 3) { 4067 switch ((val >> 6) & 3) {
@@ -3985,6 +4232,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3985 4232
3986 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); 4233 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3987 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 4234 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4235
4236 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
3988} 4237}
3989 4238
3990static unsigned long intel_pxfreq(u32 vidfreq) 4239static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4603,13 +4852,12 @@ void intel_disable_gt_powersave(struct drm_device *dev)
4603 } else if (INTEL_INFO(dev)->gen >= 6) { 4852 } else if (INTEL_INFO(dev)->gen >= 6) {
4604 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4853 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4605 cancel_work_sync(&dev_priv->rps.work); 4854 cancel_work_sync(&dev_priv->rps.work);
4606 if (IS_VALLEYVIEW(dev))
4607 cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
4608 mutex_lock(&dev_priv->rps.hw_lock); 4855 mutex_lock(&dev_priv->rps.hw_lock);
4609 if (IS_VALLEYVIEW(dev)) 4856 if (IS_VALLEYVIEW(dev))
4610 valleyview_disable_rps(dev); 4857 valleyview_disable_rps(dev);
4611 else 4858 else
4612 gen6_disable_rps(dev); 4859 gen6_disable_rps(dev);
4860 dev_priv->rps.enabled = false;
4613 mutex_unlock(&dev_priv->rps.hw_lock); 4861 mutex_unlock(&dev_priv->rps.hw_lock);
4614 } 4862 }
4615} 4863}
@@ -4629,6 +4877,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
4629 gen6_enable_rps(dev); 4877 gen6_enable_rps(dev);
4630 gen6_update_ring_freq(dev); 4878 gen6_update_ring_freq(dev);
4631 } 4879 }
4880 dev_priv->rps.enabled = true;
4632 mutex_unlock(&dev_priv->rps.hw_lock); 4881 mutex_unlock(&dev_priv->rps.hw_lock);
4633} 4882}
4634 4883
@@ -4672,7 +4921,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
4672 I915_WRITE(DSPCNTR(pipe), 4921 I915_WRITE(DSPCNTR(pipe),
4673 I915_READ(DSPCNTR(pipe)) | 4922 I915_READ(DSPCNTR(pipe)) |
4674 DISPPLANE_TRICKLE_FEED_DISABLE); 4923 DISPPLANE_TRICKLE_FEED_DISABLE);
4675 intel_flush_display_plane(dev_priv, pipe); 4924 intel_flush_primary_plane(dev_priv, pipe);
4676 } 4925 }
4677} 4926}
4678 4927
@@ -5255,6 +5504,23 @@ void intel_suspend_hw(struct drm_device *dev)
5255 lpt_suspend_hw(dev); 5504 lpt_suspend_hw(dev);
5256} 5505}
5257 5506
5507static bool is_always_on_power_domain(struct drm_device *dev,
5508 enum intel_display_power_domain domain)
5509{
5510 unsigned long always_on_domains;
5511
5512 BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
5513
5514 if (IS_HASWELL(dev)) {
5515 always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
5516 } else {
5517 WARN_ON(1);
5518 return true;
5519 }
5520
5521 return BIT(domain) & always_on_domains;
5522}
5523
5258/** 5524/**
5259 * We should only use the power well if we explicitly asked the hardware to 5525 * We should only use the power well if we explicitly asked the hardware to
5260 * enable it, so check if it's enabled and also check if we've requested it to 5526 * enable it, so check if it's enabled and also check if we've requested it to
@@ -5268,23 +5534,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
5268 if (!HAS_POWER_WELL(dev)) 5534 if (!HAS_POWER_WELL(dev))
5269 return true; 5535 return true;
5270 5536
5271 switch (domain) { 5537 if (is_always_on_power_domain(dev, domain))
5272 case POWER_DOMAIN_PIPE_A:
5273 case POWER_DOMAIN_TRANSCODER_EDP:
5274 return true; 5538 return true;
5275 case POWER_DOMAIN_PIPE_B: 5539
5276 case POWER_DOMAIN_PIPE_C: 5540 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5277 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5278 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5279 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5280 case POWER_DOMAIN_TRANSCODER_A:
5281 case POWER_DOMAIN_TRANSCODER_B:
5282 case POWER_DOMAIN_TRANSCODER_C:
5283 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5284 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5541 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5285 default:
5286 BUG();
5287 }
5288} 5542}
5289 5543
5290static void __intel_set_power_well(struct drm_device *dev, bool enable) 5544static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5328,83 +5582,136 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5328 spin_lock_irqsave(&dev->vbl_lock, irqflags); 5582 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5329 for_each_pipe(p) 5583 for_each_pipe(p)
5330 if (p != PIPE_A) 5584 if (p != PIPE_A)
5331 dev->last_vblank[p] = 0; 5585 dev->vblank[p].last = 0;
5332 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 5586 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5333 } 5587 }
5334 } 5588 }
5335} 5589}
5336 5590
5337static struct i915_power_well *hsw_pwr; 5591static void __intel_power_well_get(struct drm_device *dev,
5592 struct i915_power_well *power_well)
5593{
5594 if (!power_well->count++)
5595 __intel_set_power_well(dev, true);
5596}
5597
5598static void __intel_power_well_put(struct drm_device *dev,
5599 struct i915_power_well *power_well)
5600{
5601 WARN_ON(!power_well->count);
5602 if (!--power_well->count && i915_disable_power_well)
5603 __intel_set_power_well(dev, false);
5604}
5605
5606void intel_display_power_get(struct drm_device *dev,
5607 enum intel_display_power_domain domain)
5608{
5609 struct drm_i915_private *dev_priv = dev->dev_private;
5610 struct i915_power_domains *power_domains;
5611
5612 if (!HAS_POWER_WELL(dev))
5613 return;
5614
5615 if (is_always_on_power_domain(dev, domain))
5616 return;
5617
5618 power_domains = &dev_priv->power_domains;
5619
5620 mutex_lock(&power_domains->lock);
5621 __intel_power_well_get(dev, &power_domains->power_wells[0]);
5622 mutex_unlock(&power_domains->lock);
5623}
5624
5625void intel_display_power_put(struct drm_device *dev,
5626 enum intel_display_power_domain domain)
5627{
5628 struct drm_i915_private *dev_priv = dev->dev_private;
5629 struct i915_power_domains *power_domains;
5630
5631 if (!HAS_POWER_WELL(dev))
5632 return;
5633
5634 if (is_always_on_power_domain(dev, domain))
5635 return;
5636
5637 power_domains = &dev_priv->power_domains;
5638
5639 mutex_lock(&power_domains->lock);
5640 __intel_power_well_put(dev, &power_domains->power_wells[0]);
5641 mutex_unlock(&power_domains->lock);
5642}
5643
5644static struct i915_power_domains *hsw_pwr;
5338 5645
5339/* Display audio driver power well request */ 5646/* Display audio driver power well request */
5340void i915_request_power_well(void) 5647void i915_request_power_well(void)
5341{ 5648{
5649 struct drm_i915_private *dev_priv;
5650
5342 if (WARN_ON(!hsw_pwr)) 5651 if (WARN_ON(!hsw_pwr))
5343 return; 5652 return;
5344 5653
5345 spin_lock_irq(&hsw_pwr->lock); 5654 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5346 if (!hsw_pwr->count++ && 5655 power_domains);
5347 !hsw_pwr->i915_request) 5656
5348 __intel_set_power_well(hsw_pwr->device, true); 5657 mutex_lock(&hsw_pwr->lock);
5349 spin_unlock_irq(&hsw_pwr->lock); 5658 __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
5659 mutex_unlock(&hsw_pwr->lock);
5350} 5660}
5351EXPORT_SYMBOL_GPL(i915_request_power_well); 5661EXPORT_SYMBOL_GPL(i915_request_power_well);
5352 5662
5353/* Display audio driver power well release */ 5663/* Display audio driver power well release */
5354void i915_release_power_well(void) 5664void i915_release_power_well(void)
5355{ 5665{
5666 struct drm_i915_private *dev_priv;
5667
5356 if (WARN_ON(!hsw_pwr)) 5668 if (WARN_ON(!hsw_pwr))
5357 return; 5669 return;
5358 5670
5359 spin_lock_irq(&hsw_pwr->lock); 5671 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5360 WARN_ON(!hsw_pwr->count); 5672 power_domains);
5361 if (!--hsw_pwr->count && 5673
5362 !hsw_pwr->i915_request) 5674 mutex_lock(&hsw_pwr->lock);
5363 __intel_set_power_well(hsw_pwr->device, false); 5675 __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
5364 spin_unlock_irq(&hsw_pwr->lock); 5676 mutex_unlock(&hsw_pwr->lock);
5365} 5677}
5366EXPORT_SYMBOL_GPL(i915_release_power_well); 5678EXPORT_SYMBOL_GPL(i915_release_power_well);
5367 5679
5368int i915_init_power_well(struct drm_device *dev) 5680int intel_power_domains_init(struct drm_device *dev)
5369{ 5681{
5370 struct drm_i915_private *dev_priv = dev->dev_private; 5682 struct drm_i915_private *dev_priv = dev->dev_private;
5683 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5684 struct i915_power_well *power_well;
5371 5685
5372 hsw_pwr = &dev_priv->power_well; 5686 mutex_init(&power_domains->lock);
5687 hsw_pwr = power_domains;
5373 5688
5374 hsw_pwr->device = dev; 5689 power_well = &power_domains->power_wells[0];
5375 spin_lock_init(&hsw_pwr->lock); 5690 power_well->count = 0;
5376 hsw_pwr->count = 0;
5377 5691
5378 return 0; 5692 return 0;
5379} 5693}
5380 5694
5381void i915_remove_power_well(struct drm_device *dev) 5695void intel_power_domains_remove(struct drm_device *dev)
5382{ 5696{
5383 hsw_pwr = NULL; 5697 hsw_pwr = NULL;
5384} 5698}
5385 5699
5386void intel_set_power_well(struct drm_device *dev, bool enable) 5700static void intel_power_domains_resume(struct drm_device *dev)
5387{ 5701{
5388 struct drm_i915_private *dev_priv = dev->dev_private; 5702 struct drm_i915_private *dev_priv = dev->dev_private;
5389 struct i915_power_well *power_well = &dev_priv->power_well; 5703 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5704 struct i915_power_well *power_well;
5390 5705
5391 if (!HAS_POWER_WELL(dev)) 5706 if (!HAS_POWER_WELL(dev))
5392 return; 5707 return;
5393 5708
5394 if (!i915_disable_power_well && !enable) 5709 mutex_lock(&power_domains->lock);
5395 return;
5396 5710
5397 spin_lock_irq(&power_well->lock); 5711 power_well = &power_domains->power_wells[0];
5398 power_well->i915_request = enable; 5712 __intel_set_power_well(dev, power_well->count > 0);
5399 5713
5400 /* only reject "disable" power well request */ 5714 mutex_unlock(&power_domains->lock);
5401 if (power_well->count && !enable) {
5402 spin_unlock_irq(&power_well->lock);
5403 return;
5404 }
5405
5406 __intel_set_power_well(dev, enable);
5407 spin_unlock_irq(&power_well->lock);
5408} 5715}
5409 5716
5410/* 5717/*
@@ -5413,7 +5720,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
5413 * to be enabled, and it will only be disabled if none of the registers is 5720 * to be enabled, and it will only be disabled if none of the registers is
5414 * requesting it to be enabled. 5721 * requesting it to be enabled.
5415 */ 5722 */
5416void intel_init_power_well(struct drm_device *dev) 5723void intel_power_domains_init_hw(struct drm_device *dev)
5417{ 5724{
5418 struct drm_i915_private *dev_priv = dev->dev_private; 5725 struct drm_i915_private *dev_priv = dev->dev_private;
5419 5726
@@ -5421,7 +5728,8 @@ void intel_init_power_well(struct drm_device *dev)
5421 return; 5728 return;
5422 5729
5423 /* For now, we need the power well to be always enabled. */ 5730 /* For now, we need the power well to be always enabled. */
5424 intel_set_power_well(dev, true); 5731 intel_display_set_init_power(dev, true);
5732 intel_power_domains_resume(dev);
5425 5733
5426 /* We're taking over the BIOS, so clear any requests made by it since 5734 /* We're taking over the BIOS, so clear any requests made by it since
5427 * the driver is in charge now. */ 5735 * the driver is in charge now. */
@@ -5686,7 +5994,5 @@ void intel_pm_init(struct drm_device *dev)
5686 5994
5687 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5995 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5688 intel_gen6_powersave_work); 5996 intel_gen6_powersave_work);
5689
5690 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
5691} 5997}
5692 5998
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee1026fca..2dec134f75eb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
41 return space; 41 return space;
42} 42}
43 43
44void __intel_ring_advance(struct intel_ring_buffer *ring)
45{
46 struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48 ring->tail &= ring->size - 1;
49 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50 return;
51 ring->write_tail(ring, ring->tail);
52}
53
44static int 54static int
45gen2_render_ring_flush(struct intel_ring_buffer *ring, 55gen2_render_ring_flush(struct intel_ring_buffer *ring,
46 u32 invalidate_domains, 56 u32 invalidate_domains,
@@ -385,8 +395,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
385 int ret = 0; 395 int ret = 0;
386 u32 head; 396 u32 head;
387 397
388 if (HAS_FORCE_WAKE(dev)) 398 gen6_gt_force_wake_get(dev_priv);
389 gen6_gt_force_wake_get(dev_priv);
390 399
391 if (I915_NEED_GFX_HWS(dev)) 400 if (I915_NEED_GFX_HWS(dev))
392 intel_ring_setup_status_page(ring); 401 intel_ring_setup_status_page(ring);
@@ -459,8 +468,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
459 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 468 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
460 469
461out: 470out:
462 if (HAS_FORCE_WAKE(dev)) 471 gen6_gt_force_wake_put(dev_priv);
463 gen6_gt_force_wake_put(dev_priv);
464 472
465 return ret; 473 return ret;
466} 474}
@@ -559,8 +567,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
559 if (INTEL_INFO(dev)->gen >= 6) 567 if (INTEL_INFO(dev)->gen >= 6)
560 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 568 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
561 569
562 if (HAS_L3_GPU_CACHE(dev)) 570 if (HAS_L3_DPF(dev))
563 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 571 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
564 572
565 return ret; 573 return ret;
566} 574}
@@ -593,7 +601,7 @@ update_mboxes(struct intel_ring_buffer *ring,
593#define MBOX_UPDATE_DWORDS 4 601#define MBOX_UPDATE_DWORDS 4
594 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 602 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
595 intel_ring_emit(ring, mmio_offset); 603 intel_ring_emit(ring, mmio_offset);
596 intel_ring_emit(ring, ring->outstanding_lazy_request); 604 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
597 intel_ring_emit(ring, MI_NOOP); 605 intel_ring_emit(ring, MI_NOOP);
598} 606}
599 607
@@ -629,9 +637,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
629 637
630 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 638 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
631 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 639 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
632 intel_ring_emit(ring, ring->outstanding_lazy_request); 640 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
633 intel_ring_emit(ring, MI_USER_INTERRUPT); 641 intel_ring_emit(ring, MI_USER_INTERRUPT);
634 intel_ring_advance(ring); 642 __intel_ring_advance(ring);
635 643
636 return 0; 644 return 0;
637} 645}
@@ -723,7 +731,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
723 PIPE_CONTROL_WRITE_FLUSH | 731 PIPE_CONTROL_WRITE_FLUSH |
724 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 732 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
725 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 733 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
726 intel_ring_emit(ring, ring->outstanding_lazy_request); 734 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
727 intel_ring_emit(ring, 0); 735 intel_ring_emit(ring, 0);
728 PIPE_CONTROL_FLUSH(ring, scratch_addr); 736 PIPE_CONTROL_FLUSH(ring, scratch_addr);
729 scratch_addr += 128; /* write to separate cachelines */ 737 scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +750,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
742 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 750 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
743 PIPE_CONTROL_NOTIFY); 751 PIPE_CONTROL_NOTIFY);
744 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 752 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
745 intel_ring_emit(ring, ring->outstanding_lazy_request); 753 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
746 intel_ring_emit(ring, 0); 754 intel_ring_emit(ring, 0);
747 intel_ring_advance(ring); 755 __intel_ring_advance(ring);
748 756
749 return 0; 757 return 0;
750} 758}
@@ -963,9 +971,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
963 971
964 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 972 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
965 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 973 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
966 intel_ring_emit(ring, ring->outstanding_lazy_request); 974 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
967 intel_ring_emit(ring, MI_USER_INTERRUPT); 975 intel_ring_emit(ring, MI_USER_INTERRUPT);
968 intel_ring_advance(ring); 976 __intel_ring_advance(ring);
969 977
970 return 0; 978 return 0;
971} 979}
@@ -987,10 +995,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
987 995
988 spin_lock_irqsave(&dev_priv->irq_lock, flags); 996 spin_lock_irqsave(&dev_priv->irq_lock, flags);
989 if (ring->irq_refcount++ == 0) { 997 if (ring->irq_refcount++ == 0) {
990 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 998 if (HAS_L3_DPF(dev) && ring->id == RCS)
991 I915_WRITE_IMR(ring, 999 I915_WRITE_IMR(ring,
992 ~(ring->irq_enable_mask | 1000 ~(ring->irq_enable_mask |
993 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1001 GT_PARITY_ERROR(dev)));
994 else 1002 else
995 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1003 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
996 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1004 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1017,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1009 1017
1010 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1018 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1011 if (--ring->irq_refcount == 0) { 1019 if (--ring->irq_refcount == 0) {
1012 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1020 if (HAS_L3_DPF(dev) && ring->id == RCS)
1013 I915_WRITE_IMR(ring, 1021 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1014 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1015 else 1022 else
1016 I915_WRITE_IMR(ring, ~0); 1023 I915_WRITE_IMR(ring, ~0);
1017 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1024 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1317,7 +1324,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1317 /* Disable the ring buffer. The ring must be idle at this point */ 1324 /* Disable the ring buffer. The ring must be idle at this point */
1318 dev_priv = ring->dev->dev_private; 1325 dev_priv = ring->dev->dev_private;
1319 ret = intel_ring_idle(ring); 1326 ret = intel_ring_idle(ring);
1320 if (ret) 1327 if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1321 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1328 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1322 ring->name, ret); 1329 ring->name, ret);
1323 1330
@@ -1328,6 +1335,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1328 i915_gem_object_unpin(ring->obj); 1335 i915_gem_object_unpin(ring->obj);
1329 drm_gem_object_unreference(&ring->obj->base); 1336 drm_gem_object_unreference(&ring->obj->base);
1330 ring->obj = NULL; 1337 ring->obj = NULL;
1338 ring->preallocated_lazy_request = NULL;
1339 ring->outstanding_lazy_seqno = 0;
1331 1340
1332 if (ring->cleanup) 1341 if (ring->cleanup)
1333 ring->cleanup(ring); 1342 ring->cleanup(ring);
@@ -1414,6 +1423,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1414 if (ret != -ENOSPC) 1423 if (ret != -ENOSPC)
1415 return ret; 1424 return ret;
1416 1425
1426 /* force the tail write in case we have been skipping them */
1427 __intel_ring_advance(ring);
1428
1417 trace_i915_ring_wait_begin(ring); 1429 trace_i915_ring_wait_begin(ring);
1418 /* With GEM the hangcheck timer should kick us out of the loop, 1430 /* With GEM the hangcheck timer should kick us out of the loop,
1419 * leaving it early runs the risk of corrupting GEM state (due 1431 * leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1487,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1475 int ret; 1487 int ret;
1476 1488
1477 /* We need to add any requests required to flush the objects and ring */ 1489 /* We need to add any requests required to flush the objects and ring */
1478 if (ring->outstanding_lazy_request) { 1490 if (ring->outstanding_lazy_seqno) {
1479 ret = i915_add_request(ring, NULL); 1491 ret = i915_add_request(ring, NULL);
1480 if (ret) 1492 if (ret)
1481 return ret; 1493 return ret;
@@ -1495,10 +1507,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1495static int 1507static int
1496intel_ring_alloc_seqno(struct intel_ring_buffer *ring) 1508intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1497{ 1509{
1498 if (ring->outstanding_lazy_request) 1510 if (ring->outstanding_lazy_seqno)
1499 return 0; 1511 return 0;
1500 1512
1501 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); 1513 if (ring->preallocated_lazy_request == NULL) {
1514 struct drm_i915_gem_request *request;
1515
1516 request = kmalloc(sizeof(*request), GFP_KERNEL);
1517 if (request == NULL)
1518 return -ENOMEM;
1519
1520 ring->preallocated_lazy_request = request;
1521 }
1522
1523 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1502} 1524}
1503 1525
1504static int __intel_ring_begin(struct intel_ring_buffer *ring, 1526static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1567,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1545{ 1567{
1546 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1568 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1547 1569
1548 BUG_ON(ring->outstanding_lazy_request); 1570 BUG_ON(ring->outstanding_lazy_seqno);
1549 1571
1550 if (INTEL_INFO(ring->dev)->gen >= 6) { 1572 if (INTEL_INFO(ring->dev)->gen >= 6) {
1551 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1573 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1580,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1558 ring->hangcheck.seqno = seqno; 1580 ring->hangcheck.seqno = seqno;
1559} 1581}
1560 1582
1561void intel_ring_advance(struct intel_ring_buffer *ring)
1562{
1563 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1564
1565 ring->tail &= ring->size - 1;
1566 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
1567 return;
1568 ring->write_tail(ring, ring->tail);
1569}
1570
1571
1572static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1583static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1573 u32 value) 1584 u32 value)
1574{ 1585{
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68b1ca974d59..71a73f4fe252 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36enum intel_ring_hangcheck_action { 36enum intel_ring_hangcheck_action {
37 HANGCHECK_IDLE = 0,
37 HANGCHECK_WAIT, 38 HANGCHECK_WAIT,
38 HANGCHECK_ACTIVE, 39 HANGCHECK_ACTIVE,
39 HANGCHECK_KICK, 40 HANGCHECK_KICK,
@@ -140,7 +141,8 @@ struct intel_ring_buffer {
140 /** 141 /**
141 * Do we have some not yet emitted requests outstanding? 142 * Do we have some not yet emitted requests outstanding?
142 */ 143 */
143 u32 outstanding_lazy_request; 144 struct drm_i915_gem_request *preallocated_lazy_request;
145 u32 outstanding_lazy_seqno;
144 bool gpu_caches_dirty; 146 bool gpu_caches_dirty;
145 bool fbc_dirty; 147 bool fbc_dirty;
146 148
@@ -237,7 +239,12 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
237 iowrite32(data, ring->virtual_start + ring->tail); 239 iowrite32(data, ring->virtual_start + ring->tail);
238 ring->tail += 4; 240 ring->tail += 4;
239} 241}
240void intel_ring_advance(struct intel_ring_buffer *ring); 242static inline void intel_ring_advance(struct intel_ring_buffer *ring)
243{
244 ring->tail &= ring->size - 1;
245}
246void __intel_ring_advance(struct intel_ring_buffer *ring);
247
241int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 248int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
242void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); 249void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
243int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 250int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
258 265
259static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) 266static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
260{ 267{
261 BUG_ON(ring->outstanding_lazy_request == 0); 268 BUG_ON(ring->outstanding_lazy_seqno == 0);
262 return ring->outstanding_lazy_request; 269 return ring->outstanding_lazy_seqno;
263} 270}
264 271
265static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 272static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49482fd5b76c..a583e8f718a7 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
539 goto log_fail; 539 goto log_fail;
540 540
541 while ((status == SDVO_CMD_STATUS_PENDING || 541 while ((status == SDVO_CMD_STATUS_PENDING ||
542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) { 542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
543 if (retry < 10) 543 if (retry < 10)
544 msleep(15); 544 msleep(15);
545 else 545 else
@@ -1068,7 +1068,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1068 1068
1069static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config) 1069static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
1070{ 1070{
1071 unsigned dotclock = pipe_config->adjusted_mode.clock; 1071 unsigned dotclock = pipe_config->port_clock;
1072 struct dpll *clock = &pipe_config->dpll; 1072 struct dpll *clock = &pipe_config->dpll;
1073 1073
1074 /* SDVO TV has fixed PLL values depend on its clock range, 1074 /* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1133 */ 1133 */
1134 pipe_config->pixel_multiplier = 1134 pipe_config->pixel_multiplier =
1135 intel_sdvo_get_pixel_multiplier(adjusted_mode); 1135 intel_sdvo_get_pixel_multiplier(adjusted_mode);
1136 adjusted_mode->clock *= pipe_config->pixel_multiplier;
1137 1136
1138 if (intel_sdvo->color_range_auto) { 1137 if (intel_sdvo->color_range_auto) {
1139 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1138 /* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1217 !intel_sdvo_set_tv_format(intel_sdvo)) 1216 !intel_sdvo_set_tv_format(intel_sdvo))
1218 return; 1217 return;
1219 1218
1220 /* We have tried to get input timing in mode_fixup, and filled into
1221 * adjusted_mode.
1222 */
1223 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1219 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1224 input_dtd.part1.clock /= crtc->config.pixel_multiplier;
1225 1220
1226 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1221 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1227 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1222 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1330 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1325 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1331 struct intel_sdvo_dtd dtd; 1326 struct intel_sdvo_dtd dtd;
1332 int encoder_pixel_multiplier = 0; 1327 int encoder_pixel_multiplier = 0;
1328 int dotclock;
1333 u32 flags = 0, sdvox; 1329 u32 flags = 0, sdvox;
1334 u8 val; 1330 u8 val;
1335 bool ret; 1331 bool ret;
@@ -1368,6 +1364,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1368 >> SDVO_PORT_MULTIPLY_SHIFT) + 1; 1364 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
1369 } 1365 }
1370 1366
1367 dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
1368
1369 if (HAS_PCH_SPLIT(dev))
1370 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1371
1372 pipe_config->adjusted_mode.crtc_clock = dotclock;
1373
1371 /* Cross check the port pixel multiplier with the sdvo encoder state. */ 1374 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1372 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, 1375 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
1373 &val, 1)) { 1376 &val, 1)) {
@@ -1770,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1770{ 1773{
1771 struct edid *edid; 1774 struct edid *edid;
1772 1775
1776 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1777 connector->base.id, drm_get_connector_name(connector));
1778
1773 /* set the bus switch and get the modes */ 1779 /* set the bus switch and get the modes */
1774 edid = intel_sdvo_get_edid(connector); 1780 edid = intel_sdvo_get_edid(connector);
1775 1781
@@ -1865,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1865 uint32_t reply = 0, format_map = 0; 1871 uint32_t reply = 0, format_map = 0;
1866 int i; 1872 int i;
1867 1873
1874 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1875 connector->base.id, drm_get_connector_name(connector));
1876
1868 /* Read the list of supported input resolutions for the selected TV 1877 /* Read the list of supported input resolutions for the selected TV
1869 * format. 1878 * format.
1870 */ 1879 */
@@ -1899,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1899 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1908 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1900 struct drm_display_mode *newmode; 1909 struct drm_display_mode *newmode;
1901 1910
1911 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1912 connector->base.id, drm_get_connector_name(connector));
1913
1902 /* 1914 /*
1903 * Fetch modes from VBT. For SDVO prefer the VBT mode since some 1915 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1904 * SDVO->LVDS transcoders can't cope with the EDID mode. 1916 * SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1930 break; 1942 break;
1931 } 1943 }
1932 } 1944 }
1933
1934} 1945}
1935 1946
1936static int intel_sdvo_get_modes(struct drm_connector *connector) 1947static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1998 intel_sdvo_connector->tv_format); 2009 intel_sdvo_connector->tv_format);
1999 2010
2000 intel_sdvo_destroy_enhance_property(connector); 2011 intel_sdvo_destroy_enhance_property(connector);
2001 drm_sysfs_connector_remove(connector);
2002 drm_connector_cleanup(connector); 2012 drm_connector_cleanup(connector);
2003 kfree(intel_sdvo_connector); 2013 kfree(intel_sdvo_connector);
2004} 2014}
@@ -2394,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2394 struct intel_connector *intel_connector; 2404 struct intel_connector *intel_connector;
2395 struct intel_sdvo_connector *intel_sdvo_connector; 2405 struct intel_sdvo_connector *intel_sdvo_connector;
2396 2406
2397 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2407 DRM_DEBUG_KMS("initialising DVI device %d\n", device);
2408
2409 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2398 if (!intel_sdvo_connector) 2410 if (!intel_sdvo_connector)
2399 return false; 2411 return false;
2400 2412
@@ -2442,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2442 struct intel_connector *intel_connector; 2454 struct intel_connector *intel_connector;
2443 struct intel_sdvo_connector *intel_sdvo_connector; 2455 struct intel_sdvo_connector *intel_sdvo_connector;
2444 2456
2445 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2457 DRM_DEBUG_KMS("initialising TV type %d\n", type);
2458
2459 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2446 if (!intel_sdvo_connector) 2460 if (!intel_sdvo_connector)
2447 return false; 2461 return false;
2448 2462
@@ -2467,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2467 return true; 2481 return true;
2468 2482
2469err: 2483err:
2484 drm_sysfs_connector_remove(connector);
2470 intel_sdvo_destroy(connector); 2485 intel_sdvo_destroy(connector);
2471 return false; 2486 return false;
2472} 2487}
@@ -2479,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2479 struct intel_connector *intel_connector; 2494 struct intel_connector *intel_connector;
2480 struct intel_sdvo_connector *intel_sdvo_connector; 2495 struct intel_sdvo_connector *intel_sdvo_connector;
2481 2496
2482 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2497 DRM_DEBUG_KMS("initialising analog device %d\n", device);
2498
2499 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2483 if (!intel_sdvo_connector) 2500 if (!intel_sdvo_connector)
2484 return false; 2501 return false;
2485 2502
@@ -2510,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2510 struct intel_connector *intel_connector; 2527 struct intel_connector *intel_connector;
2511 struct intel_sdvo_connector *intel_sdvo_connector; 2528 struct intel_sdvo_connector *intel_sdvo_connector;
2512 2529
2513 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2530 DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
2531
2532 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2514 if (!intel_sdvo_connector) 2533 if (!intel_sdvo_connector)
2515 return false; 2534 return false;
2516 2535
@@ -2534,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2534 return true; 2553 return true;
2535 2554
2536err: 2555err:
2556 drm_sysfs_connector_remove(connector);
2537 intel_sdvo_destroy(connector); 2557 intel_sdvo_destroy(connector);
2538 return false; 2558 return false;
2539} 2559}
@@ -2605,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2605 2625
2606 list_for_each_entry_safe(connector, tmp, 2626 list_for_each_entry_safe(connector, tmp,
2607 &dev->mode_config.connector_list, head) { 2627 &dev->mode_config.connector_list, head) {
2608 if (intel_attached_encoder(connector) == &intel_sdvo->base) 2628 if (intel_attached_encoder(connector) == &intel_sdvo->base) {
2629 drm_sysfs_connector_remove(connector);
2609 intel_sdvo_destroy(connector); 2630 intel_sdvo_destroy(connector);
2631 }
2610 } 2632 }
2611} 2633}
2612 2634
@@ -2876,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2876 struct intel_encoder *intel_encoder; 2898 struct intel_encoder *intel_encoder;
2877 struct intel_sdvo *intel_sdvo; 2899 struct intel_sdvo *intel_sdvo;
2878 int i; 2900 int i;
2879 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2901 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
2880 if (!intel_sdvo) 2902 if (!intel_sdvo)
2881 return false; 2903 return false;
2882 2904
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9a0e6c5ea540..9944d8135e87 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -25,7 +25,10 @@
25#include "i915_drv.h" 25#include "i915_drv.h"
26#include "intel_drv.h" 26#include "intel_drv.h"
27 27
28/* IOSF sideband */ 28/*
29 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
30 * VLV_VLV2_PUNIT_HAS_0.8.docx
31 */
29static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, 32static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
30 u32 port, u32 opcode, u32 addr, u32 *val) 33 u32 port, u32 opcode, u32 addr, u32 *val)
31{ 34{
@@ -101,19 +104,83 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
101 return val; 104 return val;
102} 105}
103 106
104u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg) 107u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
105{ 108{
106 u32 val = 0; 109 u32 val = 0;
110 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
111 PUNIT_OPCODE_REG_READ, reg, &val);
112 return val;
113}
107 114
108 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO, 115void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
109 DPIO_OPCODE_REG_READ, reg, &val); 116{
117 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
118 PUNIT_OPCODE_REG_WRITE, reg, &val);
119}
120
121u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
122{
123 u32 val = 0;
124 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
125 PUNIT_OPCODE_REG_READ, reg, &val);
126 return val;
127}
128
129void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
130{
131 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
132 PUNIT_OPCODE_REG_WRITE, reg, &val);
133}
134
135u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
136{
137 u32 val = 0;
138 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
139 PUNIT_OPCODE_REG_READ, reg, &val);
140 return val;
141}
110 142
143void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
144{
145 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
146 PUNIT_OPCODE_REG_WRITE, reg, &val);
147}
148
149u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
150{
151 u32 val = 0;
152 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
153 PUNIT_OPCODE_REG_READ, reg, &val);
154 return val;
155}
156
157void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
158{
159 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
160 PUNIT_OPCODE_REG_WRITE, reg, &val);
161}
162
163static u32 vlv_get_phy_port(enum pipe pipe)
164{
165 u32 port = IOSF_PORT_DPIO;
166
167 WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
168
169 return port;
170}
171
172u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
173{
174 u32 val = 0;
175
176 vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
177 DPIO_OPCODE_REG_READ, reg, &val);
111 return val; 178 return val;
112} 179}
113 180
114void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val) 181void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
115{ 182{
116 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO, 183 vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
117 DPIO_OPCODE_REG_WRITE, reg, &val); 184 DPIO_OPCODE_REG_WRITE, reg, &val);
118} 185}
119 186
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ad6ec4b39005..8afaad6bcc48 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -288,7 +288,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
288 dev_priv->sprite_scaling_enabled |= 1 << pipe; 288 dev_priv->sprite_scaling_enabled |= 1 << pipe;
289 289
290 if (!scaling_was_enabled) { 290 if (!scaling_was_enabled) {
291 intel_update_watermarks(dev); 291 intel_update_watermarks(crtc);
292 intel_wait_for_vblank(dev, pipe); 292 intel_wait_for_vblank(dev, pipe);
293 } 293 }
294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -323,7 +323,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
323 323
324 /* potentially re-enable LP watermarks */ 324 /* potentially re-enable LP watermarks */
325 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 325 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
326 intel_update_watermarks(dev); 326 intel_update_watermarks(crtc);
327} 327}
328 328
329static void 329static void
@@ -349,7 +349,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
349 349
350 /* potentially re-enable LP watermarks */ 350 /* potentially re-enable LP watermarks */
351 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 351 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
352 intel_update_watermarks(dev); 352 intel_update_watermarks(crtc);
353} 353}
354 354
355static int 355static int
@@ -521,13 +521,28 @@ intel_enable_primary(struct drm_crtc *crtc)
521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
522 int reg = DSPCNTR(intel_crtc->plane); 522 int reg = DSPCNTR(intel_crtc->plane);
523 523
524 if (!intel_crtc->primary_disabled) 524 if (intel_crtc->primary_enabled)
525 return; 525 return;
526 526
527 intel_crtc->primary_disabled = false; 527 intel_crtc->primary_enabled = true;
528 intel_update_fbc(dev);
529 528
530 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); 529 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
530 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
531
532 /*
533 * FIXME IPS should be fine as long as one plane is
534 * enabled, but in practice it seems to have problems
535 * when going from primary only to sprite only and vice
536 * versa.
537 */
538 if (intel_crtc->config.ips_enabled) {
539 intel_wait_for_vblank(dev, intel_crtc->pipe);
540 hsw_enable_ips(intel_crtc);
541 }
542
543 mutex_lock(&dev->struct_mutex);
544 intel_update_fbc(dev);
545 mutex_unlock(&dev->struct_mutex);
531} 546}
532 547
533static void 548static void
@@ -538,13 +553,26 @@ intel_disable_primary(struct drm_crtc *crtc)
538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 553 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
539 int reg = DSPCNTR(intel_crtc->plane); 554 int reg = DSPCNTR(intel_crtc->plane);
540 555
541 if (intel_crtc->primary_disabled) 556 if (!intel_crtc->primary_enabled)
542 return; 557 return;
543 558
544 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); 559 intel_crtc->primary_enabled = false;
545 560
546 intel_crtc->primary_disabled = true; 561 mutex_lock(&dev->struct_mutex);
547 intel_update_fbc(dev); 562 if (dev_priv->fbc.plane == intel_crtc->plane)
563 intel_disable_fbc(dev);
564 mutex_unlock(&dev->struct_mutex);
565
566 /*
567 * FIXME IPS should be fine as long as one plane is
568 * enabled, but in practice it seems to have problems
569 * when going from primary only to sprite only and vice
570 * versa.
571 */
572 hsw_disable_ips(intel_crtc);
573
574 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
575 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
548} 576}
549 577
550static int 578static int
@@ -623,15 +651,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
623 uint32_t src_w, uint32_t src_h) 651 uint32_t src_w, uint32_t src_h)
624{ 652{
625 struct drm_device *dev = plane->dev; 653 struct drm_device *dev = plane->dev;
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
628 struct intel_plane *intel_plane = to_intel_plane(plane); 655 struct intel_plane *intel_plane = to_intel_plane(plane);
629 struct intel_framebuffer *intel_fb; 656 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
630 struct drm_i915_gem_object *obj, *old_obj; 657 struct drm_i915_gem_object *obj = intel_fb->obj;
631 int pipe = intel_plane->pipe; 658 struct drm_i915_gem_object *old_obj = intel_plane->obj;
632 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 659 int ret;
633 pipe);
634 int ret = 0;
635 bool disable_primary = false; 660 bool disable_primary = false;
636 bool visible; 661 bool visible;
637 int hscale, vscale; 662 int hscale, vscale;
@@ -652,29 +677,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
652 .y2 = crtc_y + crtc_h, 677 .y2 = crtc_y + crtc_h,
653 }; 678 };
654 const struct drm_rect clip = { 679 const struct drm_rect clip = {
655 .x2 = crtc->mode.hdisplay, 680 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
656 .y2 = crtc->mode.vdisplay, 681 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
682 };
683 const struct {
684 int crtc_x, crtc_y;
685 unsigned int crtc_w, crtc_h;
686 uint32_t src_x, src_y, src_w, src_h;
687 } orig = {
688 .crtc_x = crtc_x,
689 .crtc_y = crtc_y,
690 .crtc_w = crtc_w,
691 .crtc_h = crtc_h,
692 .src_x = src_x,
693 .src_y = src_y,
694 .src_w = src_w,
695 .src_h = src_h,
657 }; 696 };
658
659 intel_fb = to_intel_framebuffer(fb);
660 obj = intel_fb->obj;
661
662 old_obj = intel_plane->obj;
663
664 intel_plane->crtc_x = crtc_x;
665 intel_plane->crtc_y = crtc_y;
666 intel_plane->crtc_w = crtc_w;
667 intel_plane->crtc_h = crtc_h;
668 intel_plane->src_x = src_x;
669 intel_plane->src_y = src_y;
670 intel_plane->src_w = src_w;
671 intel_plane->src_h = src_h;
672
673 /* Pipe must be running... */
674 if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
675 DRM_DEBUG_KMS("Pipe disabled\n");
676 return -EINVAL;
677 }
678 697
679 /* Don't modify another pipe's plane */ 698 /* Don't modify another pipe's plane */
680 if (intel_plane->pipe != intel_crtc->pipe) { 699 if (intel_plane->pipe != intel_crtc->pipe) {
@@ -810,7 +829,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
810 * we can disable the primary and save power. 829 * we can disable the primary and save power.
811 */ 830 */
812 disable_primary = drm_rect_equals(&dst, &clip); 831 disable_primary = drm_rect_equals(&dst, &clip);
813 WARN_ON(disable_primary && !visible); 832 WARN_ON(disable_primary && !visible && intel_crtc->active);
814 833
815 mutex_lock(&dev->struct_mutex); 834 mutex_lock(&dev->struct_mutex);
816 835
@@ -820,27 +839,40 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
820 * the sprite planes only require 128KiB alignment and 32 PTE padding. 839 * the sprite planes only require 128KiB alignment and 32 PTE padding.
821 */ 840 */
822 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 841 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
823 if (ret)
824 goto out_unlock;
825 842
826 intel_plane->obj = obj; 843 mutex_unlock(&dev->struct_mutex);
827
828 /*
829 * Be sure to re-enable the primary before the sprite is no longer
830 * covering it fully.
831 */
832 if (!disable_primary)
833 intel_enable_primary(crtc);
834 844
835 if (visible) 845 if (ret)
836 intel_plane->update_plane(plane, crtc, fb, obj, 846 return ret;
837 crtc_x, crtc_y, crtc_w, crtc_h, 847
838 src_x, src_y, src_w, src_h); 848 intel_plane->crtc_x = orig.crtc_x;
839 else 849 intel_plane->crtc_y = orig.crtc_y;
840 intel_plane->disable_plane(plane, crtc); 850 intel_plane->crtc_w = orig.crtc_w;
851 intel_plane->crtc_h = orig.crtc_h;
852 intel_plane->src_x = orig.src_x;
853 intel_plane->src_y = orig.src_y;
854 intel_plane->src_w = orig.src_w;
855 intel_plane->src_h = orig.src_h;
856 intel_plane->obj = obj;
841 857
842 if (disable_primary) 858 if (intel_crtc->active) {
843 intel_disable_primary(crtc); 859 /*
860 * Be sure to re-enable the primary before the sprite is no longer
861 * covering it fully.
862 */
863 if (!disable_primary)
864 intel_enable_primary(crtc);
865
866 if (visible)
867 intel_plane->update_plane(plane, crtc, fb, obj,
868 crtc_x, crtc_y, crtc_w, crtc_h,
869 src_x, src_y, src_w, src_h);
870 else
871 intel_plane->disable_plane(plane, crtc);
872
873 if (disable_primary)
874 intel_disable_primary(crtc);
875 }
844 876
845 /* Unpin old obj after new one is active to avoid ugliness */ 877 /* Unpin old obj after new one is active to avoid ugliness */
846 if (old_obj) { 878 if (old_obj) {
@@ -850,17 +882,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
850 * wait for vblank to avoid ugliness, we only need to 882 * wait for vblank to avoid ugliness, we only need to
851 * do the pin & ref bookkeeping. 883 * do the pin & ref bookkeeping.
852 */ 884 */
853 if (old_obj != obj) { 885 if (old_obj != obj && intel_crtc->active)
854 mutex_unlock(&dev->struct_mutex); 886 intel_wait_for_vblank(dev, intel_crtc->pipe);
855 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 887
856 mutex_lock(&dev->struct_mutex); 888 mutex_lock(&dev->struct_mutex);
857 }
858 intel_unpin_fb_obj(old_obj); 889 intel_unpin_fb_obj(old_obj);
890 mutex_unlock(&dev->struct_mutex);
859 } 891 }
860 892
861out_unlock: 893 return 0;
862 mutex_unlock(&dev->struct_mutex);
863 return ret;
864} 894}
865 895
866static int 896static int
@@ -868,7 +898,7 @@ intel_disable_plane(struct drm_plane *plane)
868{ 898{
869 struct drm_device *dev = plane->dev; 899 struct drm_device *dev = plane->dev;
870 struct intel_plane *intel_plane = to_intel_plane(plane); 900 struct intel_plane *intel_plane = to_intel_plane(plane);
871 int ret = 0; 901 struct intel_crtc *intel_crtc;
872 902
873 if (!plane->fb) 903 if (!plane->fb)
874 return 0; 904 return 0;
@@ -876,21 +906,25 @@ intel_disable_plane(struct drm_plane *plane)
876 if (WARN_ON(!plane->crtc)) 906 if (WARN_ON(!plane->crtc))
877 return -EINVAL; 907 return -EINVAL;
878 908
879 intel_enable_primary(plane->crtc); 909 intel_crtc = to_intel_crtc(plane->crtc);
880 intel_plane->disable_plane(plane, plane->crtc);
881 910
882 if (!intel_plane->obj) 911 if (intel_crtc->active) {
883 goto out; 912 intel_enable_primary(plane->crtc);
913 intel_plane->disable_plane(plane, plane->crtc);
914 }
884 915
885 intel_wait_for_vblank(dev, intel_plane->pipe); 916 if (intel_plane->obj) {
917 if (intel_crtc->active)
918 intel_wait_for_vblank(dev, intel_plane->pipe);
886 919
887 mutex_lock(&dev->struct_mutex); 920 mutex_lock(&dev->struct_mutex);
888 intel_unpin_fb_obj(intel_plane->obj); 921 intel_unpin_fb_obj(intel_plane->obj);
889 intel_plane->obj = NULL; 922 mutex_unlock(&dev->struct_mutex);
890 mutex_unlock(&dev->struct_mutex);
891out:
892 923
893 return ret; 924 intel_plane->obj = NULL;
925 }
926
927 return 0;
894} 928}
895 929
896static void intel_destroy_plane(struct drm_plane *plane) 930static void intel_destroy_plane(struct drm_plane *plane)
@@ -1034,7 +1068,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1034 if (INTEL_INFO(dev)->gen < 5) 1068 if (INTEL_INFO(dev)->gen < 5)
1035 return -ENODEV; 1069 return -ENODEV;
1036 1070
1037 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); 1071 intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
1038 if (!intel_plane) 1072 if (!intel_plane)
1039 return -ENOMEM; 1073 return -ENOMEM;
1040 1074
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84bf6c22..18c406246a2d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -912,7 +912,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
912 if (!tv_mode) 912 if (!tv_mode)
913 return false; 913 return false;
914 914
915 pipe_config->adjusted_mode.clock = tv_mode->clock; 915 pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
@@ -1044,7 +1044,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1044 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; 1044 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
1045 1045
1046 /* Enable two fixes for the chips that need them. */ 1046 /* Enable two fixes for the chips that need them. */
1047 if (dev->pci_device < 0x2772) 1047 if (dev->pdev->device < 0x2772)
1048 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; 1048 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
1049 1049
1050 I915_WRITE(TV_H_CTL_1, hctl1); 1050 I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1094,7 +1094,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1094 unsigned int xsize, ysize; 1094 unsigned int xsize, ysize;
1095 /* Pipe must be off here */ 1095 /* Pipe must be off here */
1096 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); 1096 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1097 intel_flush_display_plane(dev_priv, intel_crtc->plane); 1097 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
1098 1098
1099 /* Wait for vblank for the disable to take effect */ 1099 /* Wait for vblank for the disable to take effect */
1100 if (IS_GEN2(dev)) 1100 if (IS_GEN2(dev))
@@ -1123,7 +1123,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1123 1123
1124 I915_WRITE(pipeconf_reg, pipeconf); 1124 I915_WRITE(pipeconf_reg, pipeconf);
1125 I915_WRITE(dspcntr_reg, dspcntr); 1125 I915_WRITE(dspcntr_reg, dspcntr);
1126 intel_flush_display_plane(dev_priv, intel_crtc->plane); 1126 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
1127 } 1127 }
1128 1128
1129 j = 0; 1129 j = 0;
@@ -1433,7 +1433,6 @@ intel_tv_get_modes(struct drm_connector *connector)
1433static void 1433static void
1434intel_tv_destroy(struct drm_connector *connector) 1434intel_tv_destroy(struct drm_connector *connector)
1435{ 1435{
1436 drm_sysfs_connector_remove(connector);
1437 drm_connector_cleanup(connector); 1436 drm_connector_cleanup(connector);
1438 kfree(connector); 1437 kfree(connector);
1439} 1438}
@@ -1518,7 +1517,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1518static int tv_is_present_in_vbt(struct drm_device *dev) 1517static int tv_is_present_in_vbt(struct drm_device *dev)
1519{ 1518{
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1519 struct drm_i915_private *dev_priv = dev->dev_private;
1521 struct child_device_config *p_child; 1520 union child_device_config *p_child;
1522 int i, ret; 1521 int i, ret;
1523 1522
1524 if (!dev_priv->vbt.child_dev_num) 1523 if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
1530 /* 1529 /*
1531 * If the device type is not TV, continue. 1530 * If the device type is not TV, continue.
1532 */ 1531 */
1533 if (p_child->device_type != DEVICE_TYPE_INT_TV && 1532 if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
1534 p_child->device_type != DEVICE_TYPE_TV) 1533 p_child->old.device_type != DEVICE_TYPE_TV)
1535 continue; 1534 continue;
1536 /* Only when the addin_offset is non-zero, it is regarded 1535 /* Only when the addin_offset is non-zero, it is regarded
1537 * as present. 1536 * as present.
1538 */ 1537 */
1539 if (p_child->addin_offset) { 1538 if (p_child->old.addin_offset) {
1540 ret = 1; 1539 ret = 1;
1541 break; 1540 break;
1542 } 1541 }
@@ -1590,12 +1589,12 @@ intel_tv_init(struct drm_device *dev)
1590 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1589 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1591 return; 1590 return;
1592 1591
1593 intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL); 1592 intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
1594 if (!intel_tv) { 1593 if (!intel_tv) {
1595 return; 1594 return;
1596 } 1595 }
1597 1596
1598 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1597 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1599 if (!intel_connector) { 1598 if (!intel_connector) {
1600 kfree(intel_tv); 1599 kfree(intel_tv);
1601 return; 1600 return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c36b00..f6fae35c568e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -204,60 +204,34 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
204 gen6_gt_check_fifodbg(dev_priv); 204 gen6_gt_check_fifodbg(dev_priv);
205} 205}
206 206
207void intel_uncore_early_sanitize(struct drm_device *dev) 207static void gen6_force_wake_work(struct work_struct *work)
208{ 208{
209 struct drm_i915_private *dev_priv = dev->dev_private; 209 struct drm_i915_private *dev_priv =
210 container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
211 unsigned long irqflags;
210 212
211 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 213 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
212 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 214 if (--dev_priv->uncore.forcewake_count == 0)
215 dev_priv->uncore.funcs.force_wake_put(dev_priv);
216 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
213} 217}
214 218
215void intel_uncore_init(struct drm_device *dev) 219void intel_uncore_early_sanitize(struct drm_device *dev)
216{ 220{
217 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
218 222
219 if (IS_VALLEYVIEW(dev)) { 223 if (HAS_FPGA_DBG_UNCLAIMED(dev))
220 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; 224 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
221 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
222 } else if (IS_HASWELL(dev)) {
223 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
224 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
225 } else if (IS_IVYBRIDGE(dev)) {
226 u32 ecobus;
227
228 /* IVB configs may use multi-threaded forcewake */
229
230 /* A small trick here - if the bios hasn't configured
231 * MT forcewake, and if the device is in RC6, then
232 * force_wake_mt_get will not wake the device and the
233 * ECOBUS read will return zero. Which will be
234 * (correctly) interpreted by the test below as MT
235 * forcewake being disabled.
236 */
237 mutex_lock(&dev->struct_mutex);
238 __gen6_gt_force_wake_mt_get(dev_priv);
239 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
240 __gen6_gt_force_wake_mt_put(dev_priv);
241 mutex_unlock(&dev->struct_mutex);
242 225
243 if (ecobus & FORCEWAKE_MT_ENABLE) { 226 if (IS_HASWELL(dev) &&
244 dev_priv->uncore.funcs.force_wake_get = 227 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
245 __gen6_gt_force_wake_mt_get; 228 /* The docs do not explain exactly how the calculation can be
246 dev_priv->uncore.funcs.force_wake_put = 229 * made. It is somewhat guessable, but for now, it's always
247 __gen6_gt_force_wake_mt_put; 230 * 128MB.
248 } else { 231 * NB: We can't write IDICR yet because we do not have gt funcs
249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 232 * set up */
250 DRM_INFO("when using vblank-synced partial screen updates.\n"); 233 dev_priv->ellc_size = 128;
251 dev_priv->uncore.funcs.force_wake_get = 234 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
252 __gen6_gt_force_wake_get;
253 dev_priv->uncore.funcs.force_wake_put =
254 __gen6_gt_force_wake_put;
255 }
256 } else if (IS_GEN6(dev)) {
257 dev_priv->uncore.funcs.force_wake_get =
258 __gen6_gt_force_wake_get;
259 dev_priv->uncore.funcs.force_wake_put =
260 __gen6_gt_force_wake_put;
261 } 235 }
262} 236}
263 237
@@ -276,10 +250,26 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
276 250
277void intel_uncore_sanitize(struct drm_device *dev) 251void intel_uncore_sanitize(struct drm_device *dev)
278{ 252{
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 u32 reg_val;
255
279 intel_uncore_forcewake_reset(dev); 256 intel_uncore_forcewake_reset(dev);
280 257
281 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 258 /* BIOS often leaves RC6 enabled, but disable it for hw init */
282 intel_disable_gt_powersave(dev); 259 intel_disable_gt_powersave(dev);
260
261 /* Turn off power gate, require especially for the BIOS less system */
262 if (IS_VALLEYVIEW(dev)) {
263
264 mutex_lock(&dev_priv->rps.hw_lock);
265 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
266
267 if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
268 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
269
270 mutex_unlock(&dev_priv->rps.hw_lock);
271
272 }
283} 273}
284 274
285/* 275/*
@@ -292,6 +282,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
292{ 282{
293 unsigned long irqflags; 283 unsigned long irqflags;
294 284
285 if (!dev_priv->uncore.funcs.force_wake_get)
286 return;
287
295 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 288 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
296 if (dev_priv->uncore.forcewake_count++ == 0) 289 if (dev_priv->uncore.forcewake_count++ == 0)
297 dev_priv->uncore.funcs.force_wake_get(dev_priv); 290 dev_priv->uncore.funcs.force_wake_get(dev_priv);
@@ -305,17 +298,22 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
305{ 298{
306 unsigned long irqflags; 299 unsigned long irqflags;
307 300
301 if (!dev_priv->uncore.funcs.force_wake_put)
302 return;
303
308 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 304 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309 if (--dev_priv->uncore.forcewake_count == 0) 305 if (--dev_priv->uncore.forcewake_count == 0) {
310 dev_priv->uncore.funcs.force_wake_put(dev_priv); 306 dev_priv->uncore.forcewake_count++;
307 mod_delayed_work(dev_priv->wq,
308 &dev_priv->uncore.force_wake_work,
309 1);
310 }
311 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 311 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
312} 312}
313 313
314/* We give fast paths for the really cool registers */ 314/* We give fast paths for the really cool registers */
315#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 315#define NEEDS_FORCE_WAKE(dev_priv, reg) \
316 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ 316 ((reg) < 0x40000 && (reg) != FORCEWAKE)
317 ((reg) < 0x40000) && \
318 ((reg) != FORCEWAKE))
319 317
320static void 318static void
321ilk_dummy_write(struct drm_i915_private *dev_priv) 319ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -329,8 +327,7 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
329static void 327static void
330hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 328hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
331{ 329{
332 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 330 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
333 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
334 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 331 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
335 reg); 332 reg);
336 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 333 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
@@ -340,20 +337,43 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
340static void 337static void
341hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 338hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
342{ 339{
343 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 340 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
344 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
345 DRM_ERROR("Unclaimed write to %x\n", reg); 341 DRM_ERROR("Unclaimed write to %x\n", reg);
346 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 342 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
347 } 343 }
348} 344}
349 345
350#define __i915_read(x) \ 346#define REG_READ_HEADER(x) \
351u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
352 unsigned long irqflags; \ 347 unsigned long irqflags; \
353 u##x val = 0; \ 348 u##x val = 0; \
354 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 349 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
355 if (dev_priv->info->gen == 5) \ 350
356 ilk_dummy_write(dev_priv); \ 351#define REG_READ_FOOTER \
352 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
353 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
354 return val
355
356#define __gen4_read(x) \
357static u##x \
358gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
359 REG_READ_HEADER(x); \
360 val = __raw_i915_read##x(dev_priv, reg); \
361 REG_READ_FOOTER; \
362}
363
364#define __gen5_read(x) \
365static u##x \
366gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
367 REG_READ_HEADER(x); \
368 ilk_dummy_write(dev_priv); \
369 val = __raw_i915_read##x(dev_priv, reg); \
370 REG_READ_FOOTER; \
371}
372
373#define __gen6_read(x) \
374static u##x \
375gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
376 REG_READ_HEADER(x); \
357 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 377 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
358 if (dev_priv->uncore.forcewake_count == 0) \ 378 if (dev_priv->uncore.forcewake_count == 0) \
359 dev_priv->uncore.funcs.force_wake_get(dev_priv); \ 379 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
@@ -363,28 +383,73 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
363 } else { \ 383 } else { \
364 val = __raw_i915_read##x(dev_priv, reg); \ 384 val = __raw_i915_read##x(dev_priv, reg); \
365 } \ 385 } \
386 REG_READ_FOOTER; \
387}
388
389__gen6_read(8)
390__gen6_read(16)
391__gen6_read(32)
392__gen6_read(64)
393__gen5_read(8)
394__gen5_read(16)
395__gen5_read(32)
396__gen5_read(64)
397__gen4_read(8)
398__gen4_read(16)
399__gen4_read(32)
400__gen4_read(64)
401
402#undef __gen6_read
403#undef __gen5_read
404#undef __gen4_read
405#undef REG_READ_FOOTER
406#undef REG_READ_HEADER
407
408#define REG_WRITE_HEADER \
409 unsigned long irqflags; \
410 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
411 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
412
413#define __gen4_write(x) \
414static void \
415gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
416 REG_WRITE_HEADER; \
417 __raw_i915_write##x(dev_priv, reg, val); \
366 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 418 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
367 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
368 return val; \
369} 419}
370 420
371__i915_read(8) 421#define __gen5_write(x) \
372__i915_read(16) 422static void \
373__i915_read(32) 423gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
374__i915_read(64) 424 REG_WRITE_HEADER; \
375#undef __i915_read 425 ilk_dummy_write(dev_priv); \
426 __raw_i915_write##x(dev_priv, reg, val); \
427 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
428}
376 429
377#define __i915_write(x) \ 430#define __gen6_write(x) \
378void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ 431static void \
379 unsigned long irqflags; \ 432gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
380 u32 __fifo_ret = 0; \ 433 u32 __fifo_ret = 0; \
381 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 434 REG_WRITE_HEADER; \
382 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 435 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
436 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
437 } \
438 __raw_i915_write##x(dev_priv, reg, val); \
439 if (unlikely(__fifo_ret)) { \
440 gen6_gt_check_fifodbg(dev_priv); \
441 } \
442 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
443}
444
445#define __hsw_write(x) \
446static void \
447hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
448 u32 __fifo_ret = 0; \
449 REG_WRITE_HEADER; \
383 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 450 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
384 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 451 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
385 } \ 452 } \
386 if (dev_priv->info->gen == 5) \
387 ilk_dummy_write(dev_priv); \
388 hsw_unclaimed_reg_clear(dev_priv, reg); \ 453 hsw_unclaimed_reg_clear(dev_priv, reg); \
389 __raw_i915_write##x(dev_priv, reg, val); \ 454 __raw_i915_write##x(dev_priv, reg, val); \
390 if (unlikely(__fifo_ret)) { \ 455 if (unlikely(__fifo_ret)) { \
@@ -393,11 +458,134 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr
393 hsw_unclaimed_reg_check(dev_priv, reg); \ 458 hsw_unclaimed_reg_check(dev_priv, reg); \
394 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 459 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
395} 460}
396__i915_write(8) 461
397__i915_write(16) 462__hsw_write(8)
398__i915_write(32) 463__hsw_write(16)
399__i915_write(64) 464__hsw_write(32)
400#undef __i915_write 465__hsw_write(64)
466__gen6_write(8)
467__gen6_write(16)
468__gen6_write(32)
469__gen6_write(64)
470__gen5_write(8)
471__gen5_write(16)
472__gen5_write(32)
473__gen5_write(64)
474__gen4_write(8)
475__gen4_write(16)
476__gen4_write(32)
477__gen4_write(64)
478
479#undef __hsw_write
480#undef __gen6_write
481#undef __gen5_write
482#undef __gen4_write
483#undef REG_WRITE_HEADER
484
485void intel_uncore_init(struct drm_device *dev)
486{
487 struct drm_i915_private *dev_priv = dev->dev_private;
488
489 INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
490 gen6_force_wake_work);
491
492 if (IS_VALLEYVIEW(dev)) {
493 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
494 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
495 } else if (IS_HASWELL(dev)) {
496 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
497 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
498 } else if (IS_IVYBRIDGE(dev)) {
499 u32 ecobus;
500
501 /* IVB configs may use multi-threaded forcewake */
502
503 /* A small trick here - if the bios hasn't configured
504 * MT forcewake, and if the device is in RC6, then
505 * force_wake_mt_get will not wake the device and the
506 * ECOBUS read will return zero. Which will be
507 * (correctly) interpreted by the test below as MT
508 * forcewake being disabled.
509 */
510 mutex_lock(&dev->struct_mutex);
511 __gen6_gt_force_wake_mt_get(dev_priv);
512 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
513 __gen6_gt_force_wake_mt_put(dev_priv);
514 mutex_unlock(&dev->struct_mutex);
515
516 if (ecobus & FORCEWAKE_MT_ENABLE) {
517 dev_priv->uncore.funcs.force_wake_get =
518 __gen6_gt_force_wake_mt_get;
519 dev_priv->uncore.funcs.force_wake_put =
520 __gen6_gt_force_wake_mt_put;
521 } else {
522 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
523 DRM_INFO("when using vblank-synced partial screen updates.\n");
524 dev_priv->uncore.funcs.force_wake_get =
525 __gen6_gt_force_wake_get;
526 dev_priv->uncore.funcs.force_wake_put =
527 __gen6_gt_force_wake_put;
528 }
529 } else if (IS_GEN6(dev)) {
530 dev_priv->uncore.funcs.force_wake_get =
531 __gen6_gt_force_wake_get;
532 dev_priv->uncore.funcs.force_wake_put =
533 __gen6_gt_force_wake_put;
534 }
535
536 switch (INTEL_INFO(dev)->gen) {
537 case 7:
538 case 6:
539 if (IS_HASWELL(dev)) {
540 dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
541 dev_priv->uncore.funcs.mmio_writew = hsw_write16;
542 dev_priv->uncore.funcs.mmio_writel = hsw_write32;
543 dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
544 } else {
545 dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
546 dev_priv->uncore.funcs.mmio_writew = gen6_write16;
547 dev_priv->uncore.funcs.mmio_writel = gen6_write32;
548 dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
549 }
550 dev_priv->uncore.funcs.mmio_readb = gen6_read8;
551 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
552 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
553 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
554 break;
555 case 5:
556 dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
557 dev_priv->uncore.funcs.mmio_writew = gen5_write16;
558 dev_priv->uncore.funcs.mmio_writel = gen5_write32;
559 dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
560 dev_priv->uncore.funcs.mmio_readb = gen5_read8;
561 dev_priv->uncore.funcs.mmio_readw = gen5_read16;
562 dev_priv->uncore.funcs.mmio_readl = gen5_read32;
563 dev_priv->uncore.funcs.mmio_readq = gen5_read64;
564 break;
565 case 4:
566 case 3:
567 case 2:
568 dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
569 dev_priv->uncore.funcs.mmio_writew = gen4_write16;
570 dev_priv->uncore.funcs.mmio_writel = gen4_write32;
571 dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
572 dev_priv->uncore.funcs.mmio_readb = gen4_read8;
573 dev_priv->uncore.funcs.mmio_readw = gen4_read16;
574 dev_priv->uncore.funcs.mmio_readl = gen4_read32;
575 dev_priv->uncore.funcs.mmio_readq = gen4_read64;
576 break;
577 }
578}
579
580void intel_uncore_fini(struct drm_device *dev)
581{
582 struct drm_i915_private *dev_priv = dev->dev_private;
583
584 flush_delayed_work(&dev_priv->uncore.force_wake_work);
585
586 /* Paranoia: make sure we have disabled everything before we exit. */
587 intel_uncore_sanitize(dev);
588}
401 589
402static const struct register_whitelist { 590static const struct register_whitelist {
403 uint64_t offset; 591 uint64_t offset;
@@ -445,36 +633,6 @@ int i915_reg_read_ioctl(struct drm_device *dev,
445 return 0; 633 return 0;
446} 634}
447 635
448static int i8xx_do_reset(struct drm_device *dev)
449{
450 struct drm_i915_private *dev_priv = dev->dev_private;
451
452 if (IS_I85X(dev))
453 return -ENODEV;
454
455 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
456 POSTING_READ(D_STATE);
457
458 if (IS_I830(dev) || IS_845G(dev)) {
459 I915_WRITE(DEBUG_RESET_I830,
460 DEBUG_RESET_DISPLAY |
461 DEBUG_RESET_RENDER |
462 DEBUG_RESET_FULL);
463 POSTING_READ(DEBUG_RESET_I830);
464 msleep(1);
465
466 I915_WRITE(DEBUG_RESET_I830, 0);
467 POSTING_READ(DEBUG_RESET_I830);
468 }
469
470 msleep(1);
471
472 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
473 POSTING_READ(D_STATE);
474
475 return 0;
476}
477
478static int i965_reset_complete(struct drm_device *dev) 636static int i965_reset_complete(struct drm_device *dev)
479{ 637{
480 u8 gdrst; 638 u8 gdrst;
@@ -576,7 +734,6 @@ int intel_gpu_reset(struct drm_device *dev)
576 case 6: return gen6_do_reset(dev); 734 case 6: return gen6_do_reset(dev);
577 case 5: return ironlake_do_reset(dev); 735 case 5: return ironlake_do_reset(dev);
578 case 4: return i965_do_reset(dev); 736 case 4: return i965_do_reset(dev);
579 case 2: return i8xx_do_reset(dev);
580 default: return -ENODEV; 737 default: return -ENODEV;
581 } 738 }
582} 739}
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index cc3166dd445a..087db33f6cff 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
406 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); 406 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
407 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); 407 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
408 408
409 dev->counters += 3;
410 dev->types[6] = _DRM_STAT_IRQ;
411 dev->types[7] = _DRM_STAT_PRIMARY;
412 dev->types[8] = _DRM_STAT_SECONDARY;
413
414 ret = drm_vblank_init(dev, 1); 409 ret = drm_vblank_init(dev, 1);
415 410
416 if (ret) { 411 if (ret) {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 598c281def0a..2b0ceb8dc11b 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev)
169 /* Disable *all* interrupts */ 169 /* Disable *all* interrupts */
170 MGA_WRITE(MGA_IEN, 0); 170 MGA_WRITE(MGA_IEN, 0);
171 171
172 dev->irq_enabled = 0; 172 dev->irq_enabled = false;
173} 173}
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index b487cdec5ee7..3a1c5fbae54a 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -5,6 +5,7 @@ config DRM_MGAG200
5 select FB_SYS_COPYAREA 5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT 6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select DRM_TTM 9 select DRM_TTM
9 help 10 help
10 This is a KMS driver for the MGA G200 server chips, it 11 This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index fcce7b2f8011..f15ea3c4a90a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -99,7 +99,6 @@ static struct drm_driver driver = {
99 .minor = DRIVER_MINOR, 99 .minor = DRIVER_MINOR,
100 .patchlevel = DRIVER_PATCHLEVEL, 100 .patchlevel = DRIVER_PATCHLEVEL,
101 101
102 .gem_init_object = mgag200_gem_init_object,
103 .gem_free_object = mgag200_gem_free_object, 102 .gem_free_object = mgag200_gem_free_object,
104 .dumb_create = mgag200_dumb_create, 103 .dumb_create = mgag200_dumb_create,
105 .dumb_map_offset = mgag200_dumb_mmap_offset, 104 .dumb_map_offset = mgag200_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index baaae19332e2..cf11ee68a6d9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev);
260int mgag200_gem_create(struct drm_device *dev, 260int mgag200_gem_create(struct drm_device *dev,
261 u32 size, bool iskernel, 261 u32 size, bool iskernel,
262 struct drm_gem_object **obj); 262 struct drm_gem_object **obj);
263int mgag200_gem_init_object(struct drm_gem_object *obj);
264int mgag200_dumb_create(struct drm_file *file, 263int mgag200_dumb_create(struct drm_file *file,
265 struct drm_device *dev, 264 struct drm_device *dev,
266 struct drm_mode_create_dumb *args); 265 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 0f8b861b10b3..b1120cb1db6d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
310 return 0; 310 return 0;
311} 311}
312 312
313int mgag200_gem_init_object(struct drm_gem_object *obj)
314{
315 BUG();
316 return 0;
317}
318
319void mgag200_bo_unref(struct mgag200_bo **bo) 313void mgag200_bo_unref(struct mgag200_bo **bo)
320{ 314{
321 struct ttm_buffer_object *tbo; 315 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a06c19cc56f8..f39ab7554fc9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,6 +14,7 @@ config DRM_MSM
14config DRM_MSM_FBDEV 14config DRM_MSM_FBDEV
15 bool "Enable legacy fbdev support for MSM modesetting driver" 15 bool "Enable legacy fbdev support for MSM modesetting driver"
16 depends on DRM_MSM 16 depends on DRM_MSM
17 select DRM_KMS_FB_HELPER
17 select FB_SYS_FILLRECT 18 select FB_SYS_FILLRECT
18 select FB_SYS_COPYAREA 19 select FB_SYS_COPYAREA
19 select FB_SYS_IMAGEBLIT 20 select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ff80f12480ea..7cf787d697b1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,6 +3,7 @@ config DRM_NOUVEAU
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select FW_LOADER 4 select FW_LOADER
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER
6 select DRM_TTM 7 select DRM_TTM
7 select FB_CFB_FILLRECT 8 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA 9 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2e70462883e8..2a15b98b4d2b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
210 sim_data.nvclk_khz = NVClk; 210 sim_data.nvclk_khz = NVClk;
211 sim_data.bpp = bpp; 211 sim_data.bpp = bpp;
212 sim_data.two_heads = nv_two_heads(dev); 212 sim_data.two_heads = nv_two_heads(dev);
213 if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || 213 if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
214 (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { 214 (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
215 uint32_t type; 215 uint32_t type;
216 216
217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); 217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
256 256
257 if (nv_device(drm->device)->card_type < NV_20) 257 if (nv_device(drm->device)->card_type < NV_20)
258 nv04_update_arb(dev, vclk, bpp, burst, lwm); 258 nv04_update_arb(dev, vclk, bpp, burst, lwm);
259 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 259 else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
260 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 260 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
261 *burst = 128; 261 *burst = 128;
262 *lwm = 0x0480; 262 *lwm = 0x0480;
263 } else 263 } else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11360fe..0e3270c3ffd2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
326 regp->MiscOutReg = 0x23; /* +hsync +vsync */ 326 regp->MiscOutReg = 0x23; /* +hsync +vsync */
327 } 327 }
328 328
329 regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
330
331 /* 329 /*
332 * Time Sequencer 330 * Time Sequencer
333 */ 331 */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 93dd23ff0093..59d1c040b84f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
490 /* BIOS scripts usually take care of the backlight, thanks 490 /* BIOS scripts usually take care of the backlight, thanks
491 * Apple for your consistency. 491 * Apple for your consistency.
492 */ 492 */
493 if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 || 493 if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
494 dev->pci_device == 0x0189 || dev->pci_device == 0x0329) { 494 dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
495 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); 496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); 497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 9928187f0a7d..2cf65e0b517e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -127,7 +127,7 @@ static inline bool
127nv_two_heads(struct drm_device *dev) 127nv_two_heads(struct drm_device *dev)
128{ 128{
129 struct nouveau_drm *drm = nouveau_drm(dev); 129 struct nouveau_drm *drm = nouveau_drm(dev);
130 const int impl = dev->pci_device & 0x0ff0; 130 const int impl = dev->pdev->device & 0x0ff0;
131 131
132 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && 132 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
133 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) 133 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +139,14 @@ nv_two_heads(struct drm_device *dev)
139static inline bool 139static inline bool
140nv_gf4_disp_arch(struct drm_device *dev) 140nv_gf4_disp_arch(struct drm_device *dev)
141{ 141{
142 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110; 142 return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
143} 143}
144 144
145static inline bool 145static inline bool
146nv_two_reg_pll(struct drm_device *dev) 146nv_two_reg_pll(struct drm_device *dev)
147{ 147{
148 struct nouveau_drm *drm = nouveau_drm(dev); 148 struct nouveau_drm *drm = nouveau_drm(dev);
149 const int impl = dev->pci_device & 0x0ff0; 149 const int impl = dev->pdev->device & 0x0ff0;
150 150
151 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) 151 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
152 return true; 152 return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b86207..f8dee834527f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -220,7 +220,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
220 int ret; 220 int ret;
221 221
222 if (plltype == PLL_MEMORY && 222 if (plltype == PLL_MEMORY &&
223 (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { 223 (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
224 uint32_t mpllP; 224 uint32_t mpllP;
225 225
226 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 226 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +230,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
230 return 400000 / mpllP; 230 return 400000 / mpllP;
231 } else 231 } else
232 if (plltype == PLL_MEMORY && 232 if (plltype == PLL_MEMORY &&
233 (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { 233 (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
234 uint32_t clock; 234 uint32_t clock;
235 235
236 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 236 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 8f467e7bfd19..72055a35f845 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -130,7 +130,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
130 if (chan->ntfy) { 130 if (chan->ntfy) {
131 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); 131 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
132 nouveau_bo_unpin(chan->ntfy); 132 nouveau_bo_unpin(chan->ntfy);
133 drm_gem_object_unreference_unlocked(chan->ntfy->gem); 133 drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
134 } 134 }
135 135
136 if (chan->heap.block_size) 136 if (chan->heap.block_size)
@@ -178,10 +178,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
178 getparam->value = device->chipset; 178 getparam->value = device->chipset;
179 break; 179 break;
180 case NOUVEAU_GETPARAM_PCI_VENDOR: 180 case NOUVEAU_GETPARAM_PCI_VENDOR:
181 getparam->value = dev->pci_vendor; 181 getparam->value = dev->pdev->vendor;
182 break; 182 break;
183 case NOUVEAU_GETPARAM_PCI_DEVICE: 183 case NOUVEAU_GETPARAM_PCI_DEVICE:
184 getparam->value = dev->pci_device; 184 getparam->value = dev->pdev->device;
185 break; 185 break;
186 case NOUVEAU_GETPARAM_BUS_TYPE: 186 case NOUVEAU_GETPARAM_BUS_TYPE:
187 if (drm_pci_device_is_agp(dev)) 187 if (drm_pci_device_is_agp(dev))
@@ -320,7 +320,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
320 goto done; 320 goto done;
321 } 321 }
322 322
323 ret = drm_gem_handle_create(file_priv, chan->ntfy->gem, 323 ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
324 &init->notifier_handle); 324 &init->notifier_handle);
325 if (ret) 325 if (ret)
326 goto done; 326 goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 3e7287675ecf..4c3feaaa1037 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
127#ifdef __powerpc__ 127#ifdef __powerpc__
128 /* Powerbook specific quirks */ 128 /* Powerbook specific quirks */
129 if (script == LVDS_RESET && 129 if (script == LVDS_RESET &&
130 (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || 130 (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
131 dev->pci_device == 0x0329)) 131 dev->pdev->device == 0x0329))
132 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 132 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
133#endif 133#endif
134 134
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 755c38d06271..4172854d4365 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -146,7 +146,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
146 struct drm_device *dev = drm->dev; 146 struct drm_device *dev = drm->dev;
147 struct nouveau_bo *nvbo = nouveau_bo(bo); 147 struct nouveau_bo *nvbo = nouveau_bo(bo);
148 148
149 if (unlikely(nvbo->gem)) 149 if (unlikely(nvbo->gem.filp))
150 DRM_ERROR("bo %p still attached to GEM object\n", bo); 150 DRM_ERROR("bo %p still attached to GEM object\n", bo);
151 WARN_ON(nvbo->pin_refcnt > 0); 151 WARN_ON(nvbo->pin_refcnt > 0);
152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -1267,7 +1267,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1267{ 1267{
1268 struct nouveau_bo *nvbo = nouveau_bo(bo); 1268 struct nouveau_bo *nvbo = nouveau_bo(bo);
1269 1269
1270 return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp); 1270 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1271} 1271}
1272 1272
1273static int 1273static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 653dbbbd4fa1..ff17c1f432fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -27,7 +27,10 @@ struct nouveau_bo {
27 u32 tile_flags; 27 u32 tile_flags;
28 struct nouveau_drm_tile *tile; 28 struct nouveau_drm_tile *tile;
29 29
30 struct drm_gem_object *gem; 30 /* Only valid if allocated via nouveau_gem_new() and iff you hold a
31 * gem reference to it! For debugging, use gem.filp != NULL to test
32 * whether it is valid. */
33 struct drm_gem_object gem;
31 34
32 /* protect by the ttm reservation lock */ 35 /* protect by the ttm reservation lock */
33 int pin_refcnt; 36 int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c5b36f9e9a10..2136d0038252 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -215,8 +215,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
215 connector->doublescan_allowed = true; 215 connector->doublescan_allowed = true;
216 if (nv_device(drm->device)->card_type == NV_20 || 216 if (nv_device(drm->device)->card_type == NV_20 ||
217 (nv_device(drm->device)->card_type == NV_10 && 217 (nv_device(drm->device)->card_type == NV_10 &&
218 (dev->pci_device & 0x0ff0) != 0x0100 && 218 (dev->pdev->device & 0x0ff0) != 0x0100 &&
219 (dev->pci_device & 0x0ff0) != 0x0150)) 219 (dev->pdev->device & 0x0ff0) != 0x0150))
220 /* HW is broken */ 220 /* HW is broken */
221 connector->interlace_allowed = false; 221 connector->interlace_allowed = false;
222 else 222 else
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590f5568..bdd5cf71a24c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -50,7 +50,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
50 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 50 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
51 51
52 if (fb->nvbo) 52 if (fb->nvbo)
53 drm_gem_object_unreference_unlocked(fb->nvbo->gem); 53 drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
54 54
55 drm_framebuffer_cleanup(drm_fb); 55 drm_framebuffer_cleanup(drm_fb);
56 kfree(fb); 56 kfree(fb);
@@ -63,7 +63,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
63{ 63{
64 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 64 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
65 65
66 return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle); 66 return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
67} 67}
68 68
69static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { 69static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -674,8 +674,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
674 if (ret) 674 if (ret)
675 return ret; 675 return ret;
676 676
677 ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); 677 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
678 drm_gem_object_unreference_unlocked(bo->gem); 678 drm_gem_object_unreference_unlocked(&bo->gem);
679 return ret; 679 return ret;
680} 680}
681 681
@@ -688,7 +688,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
688 688
689 gem = drm_gem_object_lookup(dev, file_priv, handle); 689 gem = drm_gem_object_lookup(dev, file_priv, handle);
690 if (gem) { 690 if (gem) {
691 struct nouveau_bo *bo = gem->driver_private; 691 struct nouveau_bo *bo = nouveau_gem_object(gem);
692 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 692 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
693 drm_gem_object_unreference_unlocked(gem); 693 drm_gem_object_unreference_unlocked(gem);
694 return 0; 694 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e893c5362402..428d818be775 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -834,7 +834,6 @@ driver = {
834 .gem_prime_vmap = nouveau_gem_prime_vmap, 834 .gem_prime_vmap = nouveau_gem_prime_vmap,
835 .gem_prime_vunmap = nouveau_gem_prime_vunmap, 835 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
836 836
837 .gem_init_object = nouveau_gem_object_new,
838 .gem_free_object = nouveau_gem_object_del, 837 .gem_free_object = nouveau_gem_object_del,
839 .gem_open_object = nouveau_gem_object_open, 838 .gem_open_object = nouveau_gem_object_open,
840 .gem_close_object = nouveau_gem_object_close, 839 .gem_close_object = nouveau_gem_object_close,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a86ecf65c164..c80b519b513a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
420 nouveau_bo_unmap(nouveau_fb->nvbo); 420 nouveau_bo_unmap(nouveau_fb->nvbo);
421 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); 421 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
422 nouveau_bo_unpin(nouveau_fb->nvbo); 422 nouveau_bo_unpin(nouveau_fb->nvbo);
423 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 423 drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
424 nouveau_fb->nvbo = NULL; 424 nouveau_fb->nvbo = NULL;
425 } 425 }
426 drm_fb_helper_fini(&fbcon->helper); 426 drm_fb_helper_fini(&fbcon->helper);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f32b71238c03..418a6177a653 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -34,29 +34,20 @@
34#include "nouveau_ttm.h" 34#include "nouveau_ttm.h"
35#include "nouveau_gem.h" 35#include "nouveau_gem.h"
36 36
37int
38nouveau_gem_object_new(struct drm_gem_object *gem)
39{
40 return 0;
41}
42
43void 37void
44nouveau_gem_object_del(struct drm_gem_object *gem) 38nouveau_gem_object_del(struct drm_gem_object *gem)
45{ 39{
46 struct nouveau_bo *nvbo = gem->driver_private; 40 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
47 struct ttm_buffer_object *bo = &nvbo->bo; 41 struct ttm_buffer_object *bo = &nvbo->bo;
48 42
49 if (!nvbo)
50 return;
51 nvbo->gem = NULL;
52
53 if (gem->import_attach) 43 if (gem->import_attach)
54 drm_prime_gem_destroy(gem, nvbo->bo.sg); 44 drm_prime_gem_destroy(gem, nvbo->bo.sg);
55 45
56 ttm_bo_unref(&bo);
57
58 drm_gem_object_release(gem); 46 drm_gem_object_release(gem);
59 kfree(gem); 47
48 /* reset filp so nouveau_bo_del_ttm() can test for it */
49 gem->filp = NULL;
50 ttm_bo_unref(&bo);
60} 51}
61 52
62int 53int
@@ -186,14 +177,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
186 if (nv_device(drm->device)->card_type >= NV_50) 177 if (nv_device(drm->device)->card_type >= NV_50)
187 nvbo->valid_domains &= domain; 178 nvbo->valid_domains &= domain;
188 179
189 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 180 /* Initialize the embedded gem-object. We return a single gem-reference
190 if (!nvbo->gem) { 181 * to the caller, instead of a normal nouveau_bo ttm reference. */
182 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
183 if (ret) {
191 nouveau_bo_ref(NULL, pnvbo); 184 nouveau_bo_ref(NULL, pnvbo);
192 return -ENOMEM; 185 return -ENOMEM;
193 } 186 }
194 187
195 nvbo->bo.persistent_swap_storage = nvbo->gem->filp; 188 nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
196 nvbo->gem->driver_private = nvbo;
197 return 0; 189 return 0;
198} 190}
199 191
@@ -250,15 +242,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
250 if (ret) 242 if (ret)
251 return ret; 243 return ret;
252 244
253 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 245 ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
254 if (ret == 0) { 246 if (ret == 0) {
255 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); 247 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
256 if (ret) 248 if (ret)
257 drm_gem_handle_delete(file_priv, req->info.handle); 249 drm_gem_handle_delete(file_priv, req->info.handle);
258 } 250 }
259 251
260 /* drop reference from allocate - handle holds it now */ 252 /* drop reference from allocate - handle holds it now */
261 drm_gem_object_unreference_unlocked(nvbo->gem); 253 drm_gem_object_unreference_unlocked(&nvbo->gem);
262 return ret; 254 return ret;
263} 255}
264 256
@@ -266,7 +258,7 @@ static int
266nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 258nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
267 uint32_t write_domains, uint32_t valid_domains) 259 uint32_t write_domains, uint32_t valid_domains)
268{ 260{
269 struct nouveau_bo *nvbo = gem->driver_private; 261 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
270 struct ttm_buffer_object *bo = &nvbo->bo; 262 struct ttm_buffer_object *bo = &nvbo->bo;
271 uint32_t domains = valid_domains & nvbo->valid_domains & 263 uint32_t domains = valid_domains & nvbo->valid_domains &
272 (write_domains ? write_domains : read_domains); 264 (write_domains ? write_domains : read_domains);
@@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
327 list_del(&nvbo->entry); 319 list_del(&nvbo->entry);
328 nvbo->reserved_by = NULL; 320 nvbo->reserved_by = NULL;
329 ttm_bo_unreserve_ticket(&nvbo->bo, ticket); 321 ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
330 drm_gem_object_unreference_unlocked(nvbo->gem); 322 drm_gem_object_unreference_unlocked(&nvbo->gem);
331 } 323 }
332} 324}
333 325
@@ -376,7 +368,7 @@ retry:
376 validate_fini(op, NULL); 368 validate_fini(op, NULL);
377 return -ENOENT; 369 return -ENOENT;
378 } 370 }
379 nvbo = gem->driver_private; 371 nvbo = nouveau_gem_object(gem);
380 if (nvbo == res_bo) { 372 if (nvbo == res_bo) {
381 res_bo = NULL; 373 res_bo = NULL;
382 drm_gem_object_unreference_unlocked(gem); 374 drm_gem_object_unreference_unlocked(gem);
@@ -478,7 +470,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
478 return ret; 470 return ret;
479 } 471 }
480 472
481 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 473 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
482 b->write_domains, 474 b->write_domains,
483 b->valid_domains); 475 b->valid_domains);
484 if (unlikely(ret)) { 476 if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 502e4290aa8f..7caca057bc38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -12,14 +12,13 @@
12static inline struct nouveau_bo * 12static inline struct nouveau_bo *
13nouveau_gem_object(struct drm_gem_object *gem) 13nouveau_gem_object(struct drm_gem_object *gem)
14{ 14{
15 return gem ? gem->driver_private : NULL; 15 return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
16} 16}
17 17
18/* nouveau_gem.c */ 18/* nouveau_gem.c */
19extern int nouveau_gem_new(struct drm_device *, int size, int align, 19extern int nouveau_gem_new(struct drm_device *, int size, int align,
20 uint32_t domain, uint32_t tile_mode, 20 uint32_t domain, uint32_t tile_mode,
21 uint32_t tile_flags, struct nouveau_bo **); 21 uint32_t tile_flags, struct nouveau_bo **);
22extern int nouveau_gem_object_new(struct drm_gem_object *);
23extern void nouveau_gem_object_del(struct drm_gem_object *); 22extern void nouveau_gem_object_del(struct drm_gem_object *);
24extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); 23extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
25extern void nouveau_gem_object_close(struct drm_gem_object *, 24extern void nouveau_gem_object_close(struct drm_gem_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e90468d5e5c0..51a2cb102b44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
71 return ERR_PTR(ret); 71 return ERR_PTR(ret);
72 72
73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
74 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 74
75 if (!nvbo->gem) { 75 /* Initialize the embedded gem-object. We return a single gem-reference
76 * to the caller, instead of a normal nouveau_bo ttm reference. */
77 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
78 if (ret) {
76 nouveau_bo_ref(NULL, &nvbo); 79 nouveau_bo_ref(NULL, &nvbo);
77 return ERR_PTR(-ENOMEM); 80 return ERR_PTR(-ENOMEM);
78 } 81 }
79 82
80 nvbo->gem->driver_private = nvbo; 83 return &nvbo->gem;
81 return nvbo->gem;
82} 84}
83 85
84int nouveau_gem_prime_pin(struct drm_gem_object *obj) 86int nouveau_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 20c41e73d448..6c220cd3497a 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,7 @@ config DRM_OMAP
5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM 5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
6 depends on OMAP2_DSS 6 depends on OMAP2_DSS
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select FB_SYS_FILLRECT 9 select FB_SYS_FILLRECT
9 select FB_SYS_COPYAREA 10 select FB_SYS_COPYAREA
10 select FB_SYS_IMAGEBLIT 11 select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2603d909f49c..e7fa3cd96743 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = {
620 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 620 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
621 .gem_prime_export = omap_gem_prime_export, 621 .gem_prime_export = omap_gem_prime_export,
622 .gem_prime_import = omap_gem_prime_import, 622 .gem_prime_import = omap_gem_prime_import,
623 .gem_init_object = omap_gem_init_object,
624 .gem_free_object = omap_gem_free_object, 623 .gem_free_object = omap_gem_free_object,
625 .gem_vm_ops = &omap_gem_vm_ops, 624 .gem_vm_ops = &omap_gem_vm_ops,
626 .dumb_create = omap_gem_dumb_create, 625 .dumb_create = omap_gem_dumb_create,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 30b95b736658..07847693cf49 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
220int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 220int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
221 union omap_gem_size gsize, uint32_t flags, uint32_t *handle); 221 union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
222void omap_gem_free_object(struct drm_gem_object *obj); 222void omap_gem_free_object(struct drm_gem_object *obj);
223int omap_gem_init_object(struct drm_gem_object *obj);
224void *omap_gem_vaddr(struct drm_gem_object *obj); 223void *omap_gem_vaddr(struct drm_gem_object *obj);
225int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 224int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
226 uint32_t handle, uint64_t *offset); 225 uint32_t handle, uint64_t *offset);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6ebec531..5aec3e81fe24 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1274,11 +1274,6 @@ unlock:
1274 return ret; 1274 return ret;
1275} 1275}
1276 1276
1277int omap_gem_init_object(struct drm_gem_object *obj)
1278{
1279 return -EINVAL; /* unused */
1280}
1281
1282/* don't call directly.. called from GEM core when it is time to actually 1277/* don't call directly.. called from GEM core when it is time to actually
1283 * free the object.. 1278 * free the object..
1284 */ 1279 */
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 9263db117ff8..cb858600185f 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev)
261 mutex_unlock(&dev->struct_mutex); 261 mutex_unlock(&dev->struct_mutex);
262 return -EBUSY; 262 return -EBUSY;
263 } 263 }
264 dev->irq_enabled = 1; 264 dev->irq_enabled = true;
265 mutex_unlock(&dev->struct_mutex); 265 mutex_unlock(&dev->struct_mutex);
266 266
267 /* Before installing handler */ 267 /* Before installing handler */
@@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev)
272 272
273 if (ret < 0) { 273 if (ret < 0) {
274 mutex_lock(&dev->struct_mutex); 274 mutex_lock(&dev->struct_mutex);
275 dev->irq_enabled = 0; 275 dev->irq_enabled = false;
276 mutex_unlock(&dev->struct_mutex); 276 mutex_unlock(&dev->struct_mutex);
277 return ret; 277 return ret;
278 } 278 }
@@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev)
283 283
284 if (ret < 0) { 284 if (ret < 0) {
285 mutex_lock(&dev->struct_mutex); 285 mutex_lock(&dev->struct_mutex);
286 dev->irq_enabled = 0; 286 dev->irq_enabled = false;
287 mutex_unlock(&dev->struct_mutex); 287 mutex_unlock(&dev->struct_mutex);
288 dispc_free_irq(dev); 288 dispc_free_irq(dev);
289 } 289 }
@@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev)
294int omap_drm_irq_uninstall(struct drm_device *dev) 294int omap_drm_irq_uninstall(struct drm_device *dev)
295{ 295{
296 unsigned long irqflags; 296 unsigned long irqflags;
297 int irq_enabled, i; 297 bool irq_enabled;
298 int i;
298 299
299 mutex_lock(&dev->struct_mutex); 300 mutex_lock(&dev->struct_mutex);
300 irq_enabled = dev->irq_enabled; 301 irq_enabled = dev->irq_enabled;
301 dev->irq_enabled = 0; 302 dev->irq_enabled = false;
302 mutex_unlock(&dev->struct_mutex); 303 mutex_unlock(&dev->struct_mutex);
303 304
304 /* 305 /*
@@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
307 if (dev->num_crtcs) { 308 if (dev->num_crtcs) {
308 spin_lock_irqsave(&dev->vbl_lock, irqflags); 309 spin_lock_irqsave(&dev->vbl_lock, irqflags);
309 for (i = 0; i < dev->num_crtcs; i++) { 310 for (i = 0; i < dev->num_crtcs; i++) {
310 DRM_WAKEUP(&dev->vbl_queue[i]); 311 DRM_WAKEUP(&dev->vblank[i].queue);
311 dev->vblank_enabled[i] = 0; 312 dev->vblank[i].enabled = false;
312 dev->last_vblank[i] = 313 dev->vblank[i].last =
313 dev->driver->get_vblank_counter(dev, i); 314 dev->driver->get_vblank_counter(dev, i);
314 } 315 }
315 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 316 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d6c12796023c..037d324bf58f 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@ config DRM_QXL
6 select FB_SYS_IMAGEBLIT 6 select FB_SYS_IMAGEBLIT
7 select FB_DEFERRED_IO 7 select FB_DEFERRED_IO
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER
9 select DRM_TTM 10 select DRM_TTM
10 help 11 help
11 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. 12 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 514118ae72d4..fee8748bdca5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = {
225 .debugfs_init = qxl_debugfs_init, 225 .debugfs_init = qxl_debugfs_init,
226 .debugfs_cleanup = qxl_debugfs_takedown, 226 .debugfs_cleanup = qxl_debugfs_takedown,
227#endif 227#endif
228 .gem_init_object = qxl_gem_object_init,
229 .gem_free_object = qxl_gem_object_free, 228 .gem_free_object = qxl_gem_object_free,
230 .gem_open_object = qxl_gem_object_open, 229 .gem_open_object = qxl_gem_object_open,
231 .gem_close_object = qxl_gem_object_close, 230 .gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f7c9adde46a0..41d22ed26060 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -412,7 +412,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
412 struct qxl_surface *surf, 412 struct qxl_surface *surf,
413 struct qxl_bo **qobj, 413 struct qxl_bo **qobj,
414 uint32_t *handle); 414 uint32_t *handle);
415int qxl_gem_object_init(struct drm_gem_object *obj);
416void qxl_gem_object_free(struct drm_gem_object *gobj); 415void qxl_gem_object_free(struct drm_gem_object *gobj);
417int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); 416int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
418void qxl_gem_object_close(struct drm_gem_object *obj, 417void qxl_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 1648e4125af7..b96f0c9d89b2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -28,12 +28,6 @@
28#include "qxl_drv.h" 28#include "qxl_drv.h"
29#include "qxl_object.h" 29#include "qxl_object.h"
30 30
31int qxl_gem_object_init(struct drm_gem_object *obj)
32{
33 /* we do nothings here */
34 return 0;
35}
36
37void qxl_gem_object_free(struct drm_gem_object *gobj) 31void qxl_gem_object_free(struct drm_gem_object *gobj)
38{ 32{
39 struct qxl_bo *qobj = gem_to_qxl_bo(gobj); 33 struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 00885417ffff..fb3ae07a1469 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
690 690
691 /* set the lane count on the sink */ 691 /* set the lane count on the sink */
692 tmp = dp_info->dp_lane_count; 692 tmp = dp_info->dp_lane_count;
693 if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 && 693 if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
694 dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
695 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 694 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
696 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); 695 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
697 696
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227dae0c..c155d6f3fa68 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
499 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 499 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
500 fp2_gen_cntl = 0; 500 fp2_gen_cntl = 0;
501 501
502 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 502 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
503 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 503 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
504 } 504 }
505 505
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
536 (RADEON_CRTC_SYNC_TRISTAT | 536 (RADEON_CRTC_SYNC_TRISTAT |
537 RADEON_CRTC_DISPLAY_DIS))); 537 RADEON_CRTC_DISPLAY_DIS)));
538 538
539 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 539 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
540 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); 540 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
541 } 541 }
542 542
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
554 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 554 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
555 } 555 }
556 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 556 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
557 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 557 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
558 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); 558 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
559 } 559 }
560 return r; 560 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9c14a1ba1de4..b01f231c2f19 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -100,7 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
100int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 100int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
101void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 101void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
103int radeon_gem_object_init(struct drm_gem_object *obj);
104void radeon_gem_object_free(struct drm_gem_object *obj); 103void radeon_gem_object_free(struct drm_gem_object *obj);
105int radeon_gem_object_open(struct drm_gem_object *obj, 104int radeon_gem_object_open(struct drm_gem_object *obj,
106 struct drm_file *file_priv); 105 struct drm_file *file_priv);
@@ -408,7 +407,6 @@ static struct drm_driver kms_driver = {
408 .irq_uninstall = radeon_driver_irq_uninstall_kms, 407 .irq_uninstall = radeon_driver_irq_uninstall_kms,
409 .irq_handler = radeon_driver_irq_handler_kms, 408 .irq_handler = radeon_driver_irq_handler_kms,
410 .ioctls = radeon_ioctls_kms, 409 .ioctls = radeon_ioctls_kms,
411 .gem_init_object = radeon_gem_object_init,
412 .gem_free_object = radeon_gem_object_free, 410 .gem_free_object = radeon_gem_object_free,
413 .gem_open_object = radeon_gem_object_open, 411 .gem_open_object = radeon_gem_object_open,
414 .gem_close_object = radeon_gem_object_close, 412 .gem_close_object = radeon_gem_object_close,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8a5835..805c5e566b9a 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31 31
32int radeon_gem_object_init(struct drm_gem_object *obj)
33{
34 BUG();
35
36 return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj) 32void radeon_gem_object_free(struct drm_gem_object *gobj)
40{ 33{
41 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580ddc4eb2..d6b36766e8c9 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -191,7 +191,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
191 191
192 switch (info->request) { 192 switch (info->request) {
193 case RADEON_INFO_DEVICE_ID: 193 case RADEON_INFO_DEVICE_ID:
194 *value = dev->pci_device; 194 *value = dev->pdev->device;
195 break; 195 break;
196 case RADEON_INFO_NUM_GB_PIPES: 196 case RADEON_INFO_NUM_GB_PIPES:
197 *value = rdev->num_gb_pipes; 197 *value = rdev->num_gb_pipes;
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c590cd9dca0b..d8e835ac2c5e 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER 5 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
7 select DRM_KMS_FB_HELPER
7 help 8 help
8 Choose this option if you have an R-Car chipset. 9 Choose this option if you have an R-Car chipset.
9 If M is selected the module will be called rcar-du-drm. 10 If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index ca498d151a76..d1372862d871 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -2,6 +2,7 @@ config DRM_SHMOBILE
2 tristate "DRM Support for SH Mobile" 2 tristate "DRM Support for SH Mobile"
3 depends on DRM && (ARM || SUPERH) 3 depends on DRM && (ARM || SUPERH)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
5 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
7 help 8 help
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 7a4d10106906..7c3ef79fcb37 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,6 +2,7 @@ config DRM_TILCDC
2 tristate "DRM Support for TI LCDC Display Controller" 2 tristate "DRM Support for TI LCDC Display Controller"
3 depends on DRM && OF && ARM 3 depends on DRM && OF && ARM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
5 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
7 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 6222af19f456..f02528686cd5 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -8,6 +8,7 @@ config DRM_UDL
8 select FB_SYS_IMAGEBLIT 8 select FB_SYS_IMAGEBLIT
9 select FB_DEFERRED_IO 9 select FB_DEFERRED_IO
10 select DRM_KMS_HELPER 10 select DRM_KMS_HELPER
11 select DRM_KMS_FB_HELPER
11 help 12 help
12 This is a KMS driver for the USB displaylink video adapters. 13 This is a KMS driver for the USB displaylink video adapters.
13 Say M/Y to add support for these devices via drm/kms interfaces. 14 Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 7650dc0d78ce..3ddd6cd98ac1 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -77,7 +77,6 @@ static struct drm_driver driver = {
77 .unload = udl_driver_unload, 77 .unload = udl_driver_unload,
78 78
79 /* gem hooks */ 79 /* gem hooks */
80 .gem_init_object = udl_gem_init_object,
81 .gem_free_object = udl_gem_free_object, 80 .gem_free_object = udl_gem_free_object,
82 .gem_vm_ops = &udl_gem_vm_ops, 81 .gem_vm_ops = &udl_gem_vm_ops,
83 82
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 56aec9409fa3..1fbf7b357f16 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv,
115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, 115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
116 uint32_t handle, uint64_t *offset); 116 uint32_t handle, uint64_t *offset);
117 117
118int udl_gem_init_object(struct drm_gem_object *obj);
119void udl_gem_free_object(struct drm_gem_object *gem_obj); 118void udl_gem_free_object(struct drm_gem_object *gem_obj);
120struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 119struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
121 size_t size); 120 size_t size);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8bf646183bac..24ffbe990736 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 107 }
108} 108}
109 109
110int udl_gem_init_object(struct drm_gem_object *obj)
111{
112 BUG();
113
114 return 0;
115}
116
117static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
118{ 111{
119 struct page **pages; 112 struct page **pages;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 7e3ad87c366c..927889105483 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
79 79
80 /* Linux specific until context tracking code gets ported to BSD */ 80 /* Linux specific until context tracking code gets ported to BSD */
81 /* Last context, perform cleanup */ 81 /* Last context, perform cleanup */
82 if (dev->ctx_count == 1 && dev->dev_private) { 82 if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
83 DRM_DEBUG("Last Context\n"); 83 DRM_DEBUG("Last Context\n");
84 drm_irq_uninstall(dev); 84 drm_irq_uninstall(dev);
85 via_cleanup_futex(dev_priv); 85 via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/host1x/drm/Kconfig
index 69853a4de40a..0f36ddd74e87 100644
--- a/drivers/gpu/host1x/drm/Kconfig
+++ b/drivers/gpu/host1x/drm/Kconfig
@@ -2,6 +2,7 @@ config DRM_TEGRA
2 bool "NVIDIA Tegra DRM" 2 bool "NVIDIA Tegra DRM"
3 depends on DRM 3 depends on DRM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
5 select FB_SYS_FILLRECT 6 select FB_SYS_FILLRECT
6 select FB_SYS_COPYAREA 7 select FB_SYS_COPYAREA
7 select FB_SYS_IMAGEBLIT 8 select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index 8c61ceeaa12d..df7d90a3a4fa 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -264,7 +264,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
264 * core, so we need to set this manually in order to allow the 264 * core, so we need to set this manually in order to allow the
265 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 265 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
266 */ 266 */
267 drm->irq_enabled = 1; 267 drm->irq_enabled = true;
268 268
269 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 269 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
270 if (err < 0) 270 if (err < 0)
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 394254f7d6b5..5032ff7c2259 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -1,6 +1,7 @@
1config DRM_IMX 1config DRM_IMX
2 tristate "DRM Support for Freescale i.MX" 2 tristate "DRM Support for Freescale i.MX"
3 select DRM_KMS_HELPER 3 select DRM_KMS_HELPER
4 select DRM_KMS_FB_HELPER
4 select VIDEOMODE_HELPERS 5 select VIDEOMODE_HELPERS
5 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index a2e52a0c53c9..c1014eb2907d 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -396,14 +396,14 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
396 396
397 /* 397 /*
398 * enable drm irq mode. 398 * enable drm irq mode.
399 * - with irq_enabled = 1, we can use the vblank feature. 399 * - with irq_enabled = true, we can use the vblank feature.
400 * 400 *
401 * P.S. note that we wouldn't use drm irq handler but 401 * P.S. note that we wouldn't use drm irq handler but
402 * just specific driver own one instead because 402 * just specific driver own one instead because
403 * drm framework supports only one irq handler and 403 * drm framework supports only one irq handler and
404 * drivers can well take care of their interrupts 404 * drivers can well take care of their interrupts
405 */ 405 */
406 drm->irq_enabled = 1; 406 drm->irq_enabled = true;
407 407
408 drm_mode_config_init(drm); 408 drm_mode_config_init(drm);
409 imx_drm_mode_config_init(drm); 409 imx_drm_mode_config_init(drm);
@@ -423,11 +423,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
423 goto err_init; 423 goto err_init;
424 424
425 /* 425 /*
426 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 426 * with vblank_disable_allowed = true, vblank interrupt will be disabled
427 * by drm timer once a current process gives up ownership of 427 * by drm timer once a current process gives up ownership of
428 * vblank event.(after drm_vblank_put function is called) 428 * vblank event.(after drm_vblank_put function is called)
429 */ 429 */
430 imxdrm->drm->vblank_disable_allowed = 1; 430 imxdrm->drm->vblank_disable_allowed = true;
431 431
432 if (!imx_drm_device_get()) 432 if (!imx_drm_device_get())
433 ret = -EINVAL; 433 ret = -EINVAL;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b46fb45f2cca..2b954adf5bd4 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -433,6 +433,9 @@ struct drm_file {
433 struct drm_master *master; /* master this node is currently associated with 433 struct drm_master *master; /* master this node is currently associated with
434 N.B. not always minor->master */ 434 N.B. not always minor->master */
435 435
436 /* true when the client has asked us to expose stereo 3D mode flags */
437 bool stereo_allowed;
438
436 /** 439 /**
437 * fbs - List of framebuffers associated with this file. 440 * fbs - List of framebuffers associated with this file.
438 * 441 *
@@ -667,8 +670,6 @@ struct drm_gem_object {
667 uint32_t pending_read_domains; 670 uint32_t pending_read_domains;
668 uint32_t pending_write_domain; 671 uint32_t pending_write_domain;
669 672
670 void *driver_private;
671
672 /** 673 /**
673 * dma_buf - dma buf associated with this GEM object 674 * dma_buf - dma buf associated with this GEM object
674 * 675 *
@@ -922,7 +923,6 @@ struct drm_driver {
922 * 923 *
923 * Returns 0 on success. 924 * Returns 0 on success.
924 */ 925 */
925 int (*gem_init_object) (struct drm_gem_object *obj);
926 void (*gem_free_object) (struct drm_gem_object *obj); 926 void (*gem_free_object) (struct drm_gem_object *obj);
927 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); 927 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
928 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); 928 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -1081,6 +1081,19 @@ struct drm_pending_vblank_event {
1081 struct drm_event_vblank event; 1081 struct drm_event_vblank event;
1082}; 1082};
1083 1083
1084struct drm_vblank_crtc {
1085 wait_queue_head_t queue; /**< VBLANK wait queue */
1086 struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
1087 atomic_t count; /**< number of VBLANK interrupts */
1088 atomic_t refcount; /* number of users of vblank interruptsper crtc */
1089 u32 last; /* protected by dev->vbl_lock, used */
1090 /* for wraparound handling */
1091 u32 last_wait; /* Last vblank seqno waited per CRTC */
1092 unsigned int inmodeset; /* Display driver is setting mode */
1093 bool enabled; /* so we don't call enable more than
1094 once per disable */
1095};
1096
1084/** 1097/**
1085 * DRM device structure. This structure represent a complete card that 1098 * DRM device structure. This structure represent a complete card that
1086 * may contain multiple heads. 1099 * may contain multiple heads.
@@ -1105,25 +1118,16 @@ struct drm_device {
1105 atomic_t buf_alloc; /**< Buffer allocation in progress */ 1118 atomic_t buf_alloc; /**< Buffer allocation in progress */
1106 /*@} */ 1119 /*@} */
1107 1120
1108 /** \name Performance counters */
1109 /*@{ */
1110 unsigned long counters;
1111 enum drm_stat_type types[15];
1112 atomic_t counts[15];
1113 /*@} */
1114
1115 struct list_head filelist; 1121 struct list_head filelist;
1116 1122
1117 /** \name Memory management */ 1123 /** \name Memory management */
1118 /*@{ */ 1124 /*@{ */
1119 struct list_head maplist; /**< Linked list of regions */ 1125 struct list_head maplist; /**< Linked list of regions */
1120 int map_count; /**< Number of mappable regions */
1121 struct drm_open_hash map_hash; /**< User token hash table for maps */ 1126 struct drm_open_hash map_hash; /**< User token hash table for maps */
1122 1127
1123 /** \name Context handle management */ 1128 /** \name Context handle management */
1124 /*@{ */ 1129 /*@{ */
1125 struct list_head ctxlist; /**< Linked list of context handles */ 1130 struct list_head ctxlist; /**< Linked list of context handles */
1126 int ctx_count; /**< Number of context handles */
1127 struct mutex ctxlist_mutex; /**< For ctxlist */ 1131 struct mutex ctxlist_mutex; /**< For ctxlist */
1128 1132
1129 struct idr ctx_idr; 1133 struct idr ctx_idr;
@@ -1139,12 +1143,11 @@ struct drm_device {
1139 1143
1140 /** \name Context support */ 1144 /** \name Context support */
1141 /*@{ */ 1145 /*@{ */
1142 int irq_enabled; /**< True if irq handler is enabled */ 1146 bool irq_enabled; /**< True if irq handler is enabled */
1143 __volatile__ long context_flag; /**< Context swapping flag */ 1147 __volatile__ long context_flag; /**< Context swapping flag */
1144 int last_context; /**< Last current context */ 1148 int last_context; /**< Last current context */
1145 /*@} */ 1149 /*@} */
1146 1150
1147 struct work_struct work;
1148 /** \name VBLANK IRQ support */ 1151 /** \name VBLANK IRQ support */
1149 /*@{ */ 1152 /*@{ */
1150 1153
@@ -1154,20 +1157,13 @@ struct drm_device {
1154 * Once the modeset ioctl *has* been called though, we can safely 1157 * Once the modeset ioctl *has* been called though, we can safely
1155 * disable them when unused. 1158 * disable them when unused.
1156 */ 1159 */
1157 int vblank_disable_allowed; 1160 bool vblank_disable_allowed;
1161
1162 /* array of size num_crtcs */
1163 struct drm_vblank_crtc *vblank;
1158 1164
1159 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
1160 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
1161 struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
1162 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ 1165 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
1163 spinlock_t vbl_lock; 1166 spinlock_t vbl_lock;
1164 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
1165 u32 *last_vblank; /* protected by dev->vbl_lock, used */
1166 /* for wraparound handling */
1167 int *vblank_enabled; /* so we don't call enable more than
1168 once per disable */
1169 int *vblank_inmodeset; /* Display driver is setting mode */
1170 u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
1171 struct timer_list vblank_disable_timer; 1167 struct timer_list vblank_disable_timer;
1172 1168
1173 u32 max_vblank_count; /**< size of vblank counter register */ 1169 u32 max_vblank_count; /**< size of vblank counter register */
@@ -1184,8 +1180,6 @@ struct drm_device {
1184 1180
1185 struct device *dev; /**< Device structure */ 1181 struct device *dev; /**< Device structure */
1186 struct pci_dev *pdev; /**< PCI device structure */ 1182 struct pci_dev *pdev; /**< PCI device structure */
1187 int pci_vendor; /**< PCI vendor id */
1188 int pci_device; /**< PCI device id */
1189#ifdef __alpha__ 1183#ifdef __alpha__
1190 struct pci_controller *hose; 1184 struct pci_controller *hose;
1191#endif 1185#endif
@@ -1303,6 +1297,8 @@ extern int drm_getstats(struct drm_device *dev, void *data,
1303 struct drm_file *file_priv); 1297 struct drm_file *file_priv);
1304extern int drm_getcap(struct drm_device *dev, void *data, 1298extern int drm_getcap(struct drm_device *dev, void *data,
1305 struct drm_file *file_priv); 1299 struct drm_file *file_priv);
1300extern int drm_setclientcap(struct drm_device *dev, void *data,
1301 struct drm_file *file_priv);
1306extern int drm_setversion(struct drm_device *dev, void *data, 1302extern int drm_setversion(struct drm_device *dev, void *data,
1307 struct drm_file *file_priv); 1303 struct drm_file *file_priv);
1308extern int drm_noop(struct drm_device *dev, void *data, 1304extern int drm_noop(struct drm_device *dev, void *data,
@@ -1556,8 +1552,6 @@ int drm_gem_init(struct drm_device *dev);
1556void drm_gem_destroy(struct drm_device *dev); 1552void drm_gem_destroy(struct drm_device *dev);
1557void drm_gem_object_release(struct drm_gem_object *obj); 1553void drm_gem_object_release(struct drm_gem_object *obj);
1558void drm_gem_object_free(struct kref *kref); 1554void drm_gem_object_free(struct kref *kref);
1559struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1560 size_t size);
1561int drm_gem_object_init(struct drm_device *dev, 1555int drm_gem_object_init(struct drm_device *dev,
1562 struct drm_gem_object *obj, size_t size); 1556 struct drm_gem_object *obj, size_t size);
1563void drm_gem_private_object_init(struct drm_device *dev, 1557void drm_gem_private_object_init(struct drm_device *dev,
@@ -1645,9 +1639,11 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1645 1639
1646#include <drm/drm_mem_util.h> 1640#include <drm/drm_mem_util.h>
1647 1641
1648extern int drm_fill_in_dev(struct drm_device *dev, 1642struct drm_device *drm_dev_alloc(struct drm_driver *driver,
1649 const struct pci_device_id *ent, 1643 struct device *parent);
1650 struct drm_driver *driver); 1644void drm_dev_free(struct drm_device *dev);
1645int drm_dev_register(struct drm_device *dev, unsigned long flags);
1646void drm_dev_unregister(struct drm_device *dev);
1651int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); 1647int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
1652/*@}*/ 1648/*@}*/
1653 1649
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 24f499569a2f..ba407f6b4f1f 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -108,6 +108,7 @@ enum drm_mode_status {
108 MODE_ONE_HEIGHT, /* only one height is supported */ 108 MODE_ONE_HEIGHT, /* only one height is supported */
109 MODE_ONE_SIZE, /* only one resolution is supported */ 109 MODE_ONE_SIZE, /* only one resolution is supported */
110 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ 110 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
111 MODE_NO_STEREO, /* stereo modes not supported */
111 MODE_UNVERIFIED = -3, /* mode needs to reverified */ 112 MODE_UNVERIFIED = -3, /* mode needs to reverified */
112 MODE_BAD = -2, /* unspecified reason */ 113 MODE_BAD = -2, /* unspecified reason */
113 MODE_ERROR = -1 /* error condition */ 114 MODE_ERROR = -1 /* error condition */
@@ -124,7 +125,10 @@ enum drm_mode_status {
124 .vscan = (vs), .flags = (f), \ 125 .vscan = (vs), .flags = (f), \
125 .base.type = DRM_MODE_OBJECT_MODE 126 .base.type = DRM_MODE_OBJECT_MODE
126 127
127#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ 128#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
129#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
130
131#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
128 132
129struct drm_display_mode { 133struct drm_display_mode {
130 /* Header */ 134 /* Header */
@@ -155,8 +159,7 @@ struct drm_display_mode {
155 int height_mm; 159 int height_mm;
156 160
157 /* Actual mode we give to hw */ 161 /* Actual mode we give to hw */
158 int clock_index; 162 int crtc_clock; /* in KHz */
159 int synth_clock;
160 int crtc_hdisplay; 163 int crtc_hdisplay;
161 int crtc_hblank_start; 164 int crtc_hblank_start;
162 int crtc_hblank_end; 165 int crtc_hblank_end;
@@ -180,6 +183,11 @@ struct drm_display_mode {
180 int hsync; /* in kHz */ 183 int hsync; /* in kHz */
181}; 184};
182 185
186static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
187{
188 return mode->flags & DRM_MODE_FLAG_3D_MASK;
189}
190
183enum drm_connector_status { 191enum drm_connector_status {
184 connector_status_connected = 1, 192 connector_status_connected = 1,
185 connector_status_disconnected = 2, 193 connector_status_disconnected = 2,
@@ -597,6 +605,7 @@ struct drm_connector {
597 int connector_type_id; 605 int connector_type_id;
598 bool interlace_allowed; 606 bool interlace_allowed;
599 bool doublescan_allowed; 607 bool doublescan_allowed;
608 bool stereo_allowed;
600 struct list_head modes; /* list of modes on this connector */ 609 struct list_head modes; /* list of modes on this connector */
601 610
602 enum drm_connector_status status; 611 enum drm_connector_status status;
@@ -964,6 +973,7 @@ extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_m
964extern bool drm_probe_ddc(struct i2c_adapter *adapter); 973extern bool drm_probe_ddc(struct i2c_adapter *adapter);
965extern struct edid *drm_get_edid(struct drm_connector *connector, 974extern struct edid *drm_get_edid(struct drm_connector *connector,
966 struct i2c_adapter *adapter); 975 struct i2c_adapter *adapter);
976extern struct edid *drm_edid_duplicate(const struct edid *edid);
967extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 977extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
968extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); 978extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
969extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); 979extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
@@ -975,7 +985,7 @@ extern void drm_mode_config_reset(struct drm_device *dev);
975extern void drm_mode_config_cleanup(struct drm_device *dev); 985extern void drm_mode_config_cleanup(struct drm_device *dev);
976extern void drm_mode_set_name(struct drm_display_mode *mode); 986extern void drm_mode_set_name(struct drm_display_mode *mode);
977extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); 987extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
978extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); 988extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
979extern int drm_mode_width(const struct drm_display_mode *mode); 989extern int drm_mode_width(const struct drm_display_mode *mode);
980extern int drm_mode_height(const struct drm_display_mode *mode); 990extern int drm_mode_height(const struct drm_display_mode *mode);
981 991
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index ae8dbfb1207c..a92c3754e3bb 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -77,10 +77,10 @@
77#define DP_DOWNSTREAMPORT_PRESENT 0x005 77#define DP_DOWNSTREAMPORT_PRESENT 0x005
78# define DP_DWN_STRM_PORT_PRESENT (1 << 0) 78# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
79# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 79# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
80/* 00b = DisplayPort */ 80# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1)
81/* 01b = Analog */ 81# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1)
82/* 10b = TMDS or HDMI */ 82# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1)
83/* 11b = Other */ 83# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1)
84# define DP_FORMAT_CONVERSION (1 << 3) 84# define DP_FORMAT_CONVERSION (1 << 3)
85# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ 85# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
86 86
@@ -333,20 +333,20 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
333 333
334 334
335#define DP_LINK_STATUS_SIZE 6 335#define DP_LINK_STATUS_SIZE 6
336bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], 336bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
337 int lane_count); 337 int lane_count);
338bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], 338bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
339 int lane_count); 339 int lane_count);
340u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], 340u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
341 int lane); 341 int lane);
342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 342u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
343 int lane); 343 int lane);
344 344
345#define DP_RECEIVER_CAP_SIZE 0xf 345#define DP_RECEIVER_CAP_SIZE 0xf
346#define EDP_PSR_RECEIVER_CAP_SIZE 2 346#define EDP_PSR_RECEIVER_CAP_SIZE 2
347 347
348void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 348void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
349void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 349void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
350 350
351u8 drm_dp_link_rate_to_bw_code(int link_rate); 351u8 drm_dp_link_rate_to_bw_code(int link_rate);
352int drm_dp_bw_code_to_link_rate(u8 link_bw); 352int drm_dp_bw_code_to_link_rate(u8 link_bw);
@@ -379,15 +379,22 @@ struct edp_vsc_psr {
379#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) 379#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
380 380
381static inline int 381static inline int
382drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) 382drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
383{ 383{
384 return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); 384 return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
385} 385}
386 386
387static inline u8 387static inline u8
388drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE]) 388drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
389{ 389{
390 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 390 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
391} 391}
392 392
393static inline bool
394drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
395{
396 return dpcd[DP_DPCD_REV] >= 0x11 &&
397 (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
398}
399
393#endif /* _DRM_DP_HELPER_H_ */ 400#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index fcabc42d66ab..5ad9a4e2bc59 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -93,8 +93,16 @@ struct cpufreq_policy {
93#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ 93#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
94#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ 94#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
95 95
96#ifdef CONFIG_CPU_FREQ
96struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); 97struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
97void cpufreq_cpu_put(struct cpufreq_policy *policy); 98void cpufreq_cpu_put(struct cpufreq_policy *policy);
99#else
100static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
101{
102 return NULL;
103}
104static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
105#endif
98 106
99static inline bool policy_is_shared(struct cpufreq_policy *policy) 107static inline bool policy_is_shared(struct cpufreq_policy *policy)
100{ 108{
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index ece867889cc7..9b24d65fed72 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -611,12 +611,37 @@ struct drm_gem_open {
611 __u64 size; 611 __u64 size;
612}; 612};
613 613
614#define DRM_CAP_DUMB_BUFFER 0x1
615#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
616#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
617#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
618#define DRM_CAP_PRIME 0x5
619#define DRM_PRIME_CAP_IMPORT 0x1
620#define DRM_PRIME_CAP_EXPORT 0x2
621#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
622#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
623
614/** DRM_IOCTL_GET_CAP ioctl argument type */ 624/** DRM_IOCTL_GET_CAP ioctl argument type */
615struct drm_get_cap { 625struct drm_get_cap {
616 __u64 capability; 626 __u64 capability;
617 __u64 value; 627 __u64 value;
618}; 628};
619 629
630/**
631 * DRM_CLIENT_CAP_STEREO_3D
632 *
633 * if set to 1, the DRM core will expose the stereo 3D capabilities of the
634 * monitor by advertising the supported 3D layouts in the flags of struct
635 * drm_mode_modeinfo.
636 */
637#define DRM_CLIENT_CAP_STEREO_3D 1
638
639/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
640struct drm_set_client_cap {
641 __u64 capability;
642 __u64 value;
643};
644
620#define DRM_CLOEXEC O_CLOEXEC 645#define DRM_CLOEXEC O_CLOEXEC
621struct drm_prime_handle { 646struct drm_prime_handle {
622 __u32 handle; 647 __u32 handle;
@@ -649,6 +674,7 @@ struct drm_prime_handle {
649#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) 674#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
650#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) 675#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
651#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) 676#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
677#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
652 678
653#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 679#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
654#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 680#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -774,17 +800,6 @@ struct drm_event_vblank {
774 __u32 reserved; 800 __u32 reserved;
775}; 801};
776 802
777#define DRM_CAP_DUMB_BUFFER 0x1
778#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
779#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
780#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
781#define DRM_CAP_PRIME 0x5
782#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
783#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
784
785#define DRM_PRIME_CAP_IMPORT 0x1
786#define DRM_PRIME_CAP_EXPORT 0x2
787
788/* typedef area */ 803/* typedef area */
789#ifndef __KERNEL__ 804#ifndef __KERNEL__
790typedef struct drm_clip_rect drm_clip_rect_t; 805typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 28acbaf4a81e..f104c2603ebe 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -44,20 +44,35 @@
44 44
45/* Video mode flags */ 45/* Video mode flags */
46/* bit compatible with the xorg definitions. */ 46/* bit compatible with the xorg definitions. */
47#define DRM_MODE_FLAG_PHSYNC (1<<0) 47#define DRM_MODE_FLAG_PHSYNC (1<<0)
48#define DRM_MODE_FLAG_NHSYNC (1<<1) 48#define DRM_MODE_FLAG_NHSYNC (1<<1)
49#define DRM_MODE_FLAG_PVSYNC (1<<2) 49#define DRM_MODE_FLAG_PVSYNC (1<<2)
50#define DRM_MODE_FLAG_NVSYNC (1<<3) 50#define DRM_MODE_FLAG_NVSYNC (1<<3)
51#define DRM_MODE_FLAG_INTERLACE (1<<4) 51#define DRM_MODE_FLAG_INTERLACE (1<<4)
52#define DRM_MODE_FLAG_DBLSCAN (1<<5) 52#define DRM_MODE_FLAG_DBLSCAN (1<<5)
53#define DRM_MODE_FLAG_CSYNC (1<<6) 53#define DRM_MODE_FLAG_CSYNC (1<<6)
54#define DRM_MODE_FLAG_PCSYNC (1<<7) 54#define DRM_MODE_FLAG_PCSYNC (1<<7)
55#define DRM_MODE_FLAG_NCSYNC (1<<8) 55#define DRM_MODE_FLAG_NCSYNC (1<<8)
56#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ 56#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
57#define DRM_MODE_FLAG_BCAST (1<<10) 57#define DRM_MODE_FLAG_BCAST (1<<10)
58#define DRM_MODE_FLAG_PIXMUX (1<<11) 58#define DRM_MODE_FLAG_PIXMUX (1<<11)
59#define DRM_MODE_FLAG_DBLCLK (1<<12) 59#define DRM_MODE_FLAG_DBLCLK (1<<12)
60#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 60#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
61 /*
62 * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
63 * (define not exposed to user space).
64 */
65#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
66#define DRM_MODE_FLAG_3D_NONE (0<<14)
67#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
68#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
69#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
70#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
71#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
72#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
73#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
74#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
75
61 76
62/* DPMS flags */ 77/* DPMS flags */
63/* bit compatible with the xorg definitions. */ 78/* bit compatible with the xorg definitions. */
@@ -165,6 +180,7 @@ struct drm_mode_get_plane_res {
165#define DRM_MODE_ENCODER_LVDS 3 180#define DRM_MODE_ENCODER_LVDS 3
166#define DRM_MODE_ENCODER_TVDAC 4 181#define DRM_MODE_ENCODER_TVDAC 4
167#define DRM_MODE_ENCODER_VIRTUAL 5 182#define DRM_MODE_ENCODER_VIRTUAL 5
183#define DRM_MODE_ENCODER_DSI 6
168 184
169struct drm_mode_get_encoder { 185struct drm_mode_get_encoder {
170 __u32 encoder_id; 186 __u32 encoder_id;
@@ -203,6 +219,7 @@ struct drm_mode_get_encoder {
203#define DRM_MODE_CONNECTOR_TV 13 219#define DRM_MODE_CONNECTOR_TV 13
204#define DRM_MODE_CONNECTOR_eDP 14 220#define DRM_MODE_CONNECTOR_eDP 14
205#define DRM_MODE_CONNECTOR_VIRTUAL 15 221#define DRM_MODE_CONNECTOR_VIRTUAL 15
222#define DRM_MODE_CONNECTOR_DSI 16
206 223
207struct drm_mode_get_connector { 224struct drm_mode_get_connector {
208 225
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 55bb5729bd78..3a4e97bd8607 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -38,10 +38,10 @@
38 * 38 *
39 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 39 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
40 * event from the gpu l3 cache. Additional information supplied is ROW, 40 * event from the gpu l3 cache. Additional information supplied is ROW,
41 * BANK, SUBBANK of the affected cacheline. Userspace should keep track of 41 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
42 * these events and if a specific cache-line seems to have a persistent 42 * track of these events and if a specific cache-line seems to have a
43 * error remap it with the l3 remapping tool supplied in intel-gpu-tools. 43 * persistent error remap it with the l3 remapping tool supplied in
44 * The value supplied with the event is always 1. 44 * intel-gpu-tools. The value supplied with the event is always 1.
45 * 45 *
46 * I915_ERROR_UEVENT - Generated upon error detection, currently only via 46 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
47 * hangcheck. The error detection event is a good indicator of when things 47 * hangcheck. The error detection event is a good indicator of when things