diff options
59 files changed, 8422 insertions, 5641 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 2fb9a5457522..30b3651d642b 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl | |||
| @@ -4012,7 +4012,6 @@ int num_ioctls;</synopsis> | |||
| 4012 | <title>Frontbuffer Tracking</title> | 4012 | <title>Frontbuffer Tracking</title> |
| 4013 | !Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking | 4013 | !Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking |
| 4014 | !Idrivers/gpu/drm/i915/intel_frontbuffer.c | 4014 | !Idrivers/gpu/drm/i915/intel_frontbuffer.c |
| 4015 | !Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip | ||
| 4016 | !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb | 4015 | !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb |
| 4017 | </sect2> | 4016 | </sect2> |
| 4018 | <sect2> | 4017 | <sect2> |
| @@ -4045,6 +4044,11 @@ int num_ioctls;</synopsis> | |||
| 4045 | </para> | 4044 | </para> |
| 4046 | </sect2> | 4045 | </sect2> |
| 4047 | <sect2> | 4046 | <sect2> |
| 4047 | <title>Hotplug</title> | ||
| 4048 | !Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug | ||
| 4049 | !Idrivers/gpu/drm/i915/intel_hotplug.c | ||
| 4050 | </sect2> | ||
| 4051 | <sect2> | ||
| 4048 | <title>High Definition Audio</title> | 4052 | <title>High Definition Audio</title> |
| 4049 | !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port | 4053 | !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port |
| 4050 | !Idrivers/gpu/drm/i915/intel_audio.c | 4054 | !Idrivers/gpu/drm/i915/intel_audio.c |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 315b86106572..05630dfcb9f4 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
| @@ -207,7 +207,6 @@ CONFIG_AGP_AMD64=y | |||
| 207 | CONFIG_AGP_INTEL=y | 207 | CONFIG_AGP_INTEL=y |
| 208 | CONFIG_DRM=y | 208 | CONFIG_DRM=y |
| 209 | CONFIG_DRM_I915=y | 209 | CONFIG_DRM_I915=y |
| 210 | CONFIG_DRM_I915_KMS=y | ||
| 211 | CONFIG_FB_MODE_HELPERS=y | 210 | CONFIG_FB_MODE_HELPERS=y |
| 212 | CONFIG_FB_TILEBLITTING=y | 211 | CONFIG_FB_TILEBLITTING=y |
| 213 | CONFIG_FB_EFI=y | 212 | CONFIG_FB_EFI=y |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c6dea3f6917b..1341a94cc779 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
| @@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | |||
| 1408 | } | 1408 | } |
| 1409 | EXPORT_SYMBOL(intel_gmch_probe); | 1409 | EXPORT_SYMBOL(intel_gmch_probe); |
| 1410 | 1410 | ||
| 1411 | void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, | 1411 | void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, |
| 1412 | phys_addr_t *mappable_base, unsigned long *mappable_end) | 1412 | phys_addr_t *mappable_base, u64 *mappable_end) |
| 1413 | { | 1413 | { |
| 1414 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; | 1414 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; |
| 1415 | *stolen_size = intel_private.stolen_size; | 1415 | *stolen_size = intel_private.stolen_size; |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9b6cdcbbe5bb..a717d18e7a97 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -4471,9 +4471,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev, | |||
| 4471 | goto err_created; | 4471 | goto err_created; |
| 4472 | } | 4472 | } |
| 4473 | 4473 | ||
| 4474 | if (old_blob) | 4474 | drm_property_unreference_blob(old_blob); |
| 4475 | drm_property_unreference_blob(old_blob); | ||
| 4476 | |||
| 4477 | *replace = new_blob; | 4475 | *replace = new_blob; |
| 4478 | 4476 | ||
| 4479 | return 0; | 4477 | return 0; |
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 74acca9bcd9d..eb87e2538861 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig | |||
| @@ -36,15 +36,6 @@ config DRM_I915 | |||
| 36 | i810 driver instead, and the Atom z5xx series has an entirely | 36 | i810 driver instead, and the Atom z5xx series has an entirely |
| 37 | different implementation. | 37 | different implementation. |
| 38 | 38 | ||
| 39 | config DRM_I915_KMS | ||
| 40 | bool "Enable modesetting on intel by default" | ||
| 41 | depends on DRM_I915 | ||
| 42 | default y | ||
| 43 | help | ||
| 44 | Choose this option if you want kernel modesetting enabled by default. | ||
| 45 | |||
| 46 | If in doubt, say "Y". | ||
| 47 | |||
| 48 | config DRM_I915_FBDEV | 39 | config DRM_I915_FBDEV |
| 49 | bool "Enable legacy fbdev support for the modesetting intel driver" | 40 | bool "Enable legacy fbdev support for the modesetting intel driver" |
| 50 | depends on DRM_I915 | 41 | depends on DRM_I915 |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b7ddf48e1d75..e52e01251644 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
| @@ -34,7 +34,9 @@ i915-y += i915_cmd_parser.o \ | |||
| 34 | i915_gpu_error.o \ | 34 | i915_gpu_error.o \ |
| 35 | i915_irq.o \ | 35 | i915_irq.o \ |
| 36 | i915_trace_points.o \ | 36 | i915_trace_points.o \ |
| 37 | intel_hotplug.o \ | ||
| 37 | intel_lrc.o \ | 38 | intel_lrc.o \ |
| 39 | intel_mocs.o \ | ||
| 38 | intel_ringbuffer.o \ | 40 | intel_ringbuffer.o \ |
| 39 | intel_uncore.o | 41 | intel_uncore.o |
| 40 | 42 | ||
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 89b08a896d20..732ce8785945 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | * | 22 | * |
| 23 | * Authors: | 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * Thomas Richter <thor@math.tu-berlin.de> | ||
| 25 | * | 26 | * |
| 26 | * Minor modifications (Dithering enable): | 27 | * Minor modifications (Dithering enable): |
| 27 | * Thomas Richter <thor@math.tu-berlin.de> | 28 | * Thomas Richter <thor@math.tu-berlin.de> |
| @@ -90,7 +91,7 @@ | |||
| 90 | /* | 91 | /* |
| 91 | * LCD Vertical Display Size | 92 | * LCD Vertical Display Size |
| 92 | */ | 93 | */ |
| 93 | #define VR21 0x20 | 94 | #define VR21 0x21 |
| 94 | 95 | ||
| 95 | /* | 96 | /* |
| 96 | * Panel power down status | 97 | * Panel power down status |
| @@ -155,16 +156,33 @@ | |||
| 155 | # define VR8F_POWER_MASK (0x3c) | 156 | # define VR8F_POWER_MASK (0x3c) |
| 156 | # define VR8F_POWER_POS (2) | 157 | # define VR8F_POWER_POS (2) |
| 157 | 158 | ||
| 159 | /* Some Bios implementations do not restore the DVO state upon | ||
| 160 | * resume from standby. Thus, this driver has to handle it | ||
| 161 | * instead. The following list contains all registers that | ||
| 162 | * require saving. | ||
| 163 | */ | ||
| 164 | static const uint16_t backup_addresses[] = { | ||
| 165 | 0x11, 0x12, | ||
| 166 | 0x18, 0x19, 0x1a, 0x1f, | ||
| 167 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, | ||
| 168 | 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, | ||
| 169 | 0x8e, 0x8f, | ||
| 170 | 0x10 /* this must come last */ | ||
| 171 | }; | ||
| 172 | |||
| 158 | 173 | ||
| 159 | struct ivch_priv { | 174 | struct ivch_priv { |
| 160 | bool quiet; | 175 | bool quiet; |
| 161 | 176 | ||
| 162 | uint16_t width, height; | 177 | uint16_t width, height; |
| 178 | |||
| 179 | /* Register backup */ | ||
| 180 | |||
| 181 | uint16_t reg_backup[ARRAY_SIZE(backup_addresses)]; | ||
| 163 | }; | 182 | }; |
| 164 | 183 | ||
| 165 | 184 | ||
| 166 | static void ivch_dump_regs(struct intel_dvo_device *dvo); | 185 | static void ivch_dump_regs(struct intel_dvo_device *dvo); |
| 167 | |||
| 168 | /** | 186 | /** |
| 169 | * Reads a register on the ivch. | 187 | * Reads a register on the ivch. |
| 170 | * | 188 | * |
| @@ -246,6 +264,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, | |||
| 246 | { | 264 | { |
| 247 | struct ivch_priv *priv; | 265 | struct ivch_priv *priv; |
| 248 | uint16_t temp; | 266 | uint16_t temp; |
| 267 | int i; | ||
| 249 | 268 | ||
| 250 | priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); | 269 | priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); |
| 251 | if (priv == NULL) | 270 | if (priv == NULL) |
| @@ -273,6 +292,14 @@ static bool ivch_init(struct intel_dvo_device *dvo, | |||
| 273 | ivch_read(dvo, VR20, &priv->width); | 292 | ivch_read(dvo, VR20, &priv->width); |
| 274 | ivch_read(dvo, VR21, &priv->height); | 293 | ivch_read(dvo, VR21, &priv->height); |
| 275 | 294 | ||
| 295 | /* Make a backup of the registers to be able to restore them | ||
| 296 | * upon suspend. | ||
| 297 | */ | ||
| 298 | for (i = 0; i < ARRAY_SIZE(backup_addresses); i++) | ||
| 299 | ivch_read(dvo, backup_addresses[i], priv->reg_backup + i); | ||
| 300 | |||
| 301 | ivch_dump_regs(dvo); | ||
| 302 | |||
| 276 | return true; | 303 | return true; |
| 277 | 304 | ||
| 278 | out: | 305 | out: |
| @@ -294,12 +321,31 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, | |||
| 294 | return MODE_OK; | 321 | return MODE_OK; |
| 295 | } | 322 | } |
| 296 | 323 | ||
| 324 | /* Restore the DVO registers after a resume | ||
| 325 | * from RAM. Registers have been saved during | ||
| 326 | * the initialization. | ||
| 327 | */ | ||
| 328 | static void ivch_reset(struct intel_dvo_device *dvo) | ||
| 329 | { | ||
| 330 | struct ivch_priv *priv = dvo->dev_priv; | ||
| 331 | int i; | ||
| 332 | |||
| 333 | DRM_DEBUG_KMS("Resetting the IVCH registers\n"); | ||
| 334 | |||
| 335 | ivch_write(dvo, VR10, 0x0000); | ||
| 336 | |||
| 337 | for (i = 0; i < ARRAY_SIZE(backup_addresses); i++) | ||
| 338 | ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]); | ||
| 339 | } | ||
| 340 | |||
| 297 | /** Sets the power state of the panel connected to the ivch */ | 341 | /** Sets the power state of the panel connected to the ivch */ |
| 298 | static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) | 342 | static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) |
| 299 | { | 343 | { |
| 300 | int i; | 344 | int i; |
| 301 | uint16_t vr01, vr30, backlight; | 345 | uint16_t vr01, vr30, backlight; |
| 302 | 346 | ||
| 347 | ivch_reset(dvo); | ||
| 348 | |||
| 303 | /* Set the new power state of the panel. */ | 349 | /* Set the new power state of the panel. */ |
| 304 | if (!ivch_read(dvo, VR01, &vr01)) | 350 | if (!ivch_read(dvo, VR01, &vr01)) |
| 305 | return; | 351 | return; |
| @@ -308,6 +354,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) | |||
| 308 | backlight = 1; | 354 | backlight = 1; |
| 309 | else | 355 | else |
| 310 | backlight = 0; | 356 | backlight = 0; |
| 357 | |||
| 311 | ivch_write(dvo, VR80, backlight); | 358 | ivch_write(dvo, VR80, backlight); |
| 312 | 359 | ||
| 313 | if (enable) | 360 | if (enable) |
| @@ -334,6 +381,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo) | |||
| 334 | { | 381 | { |
| 335 | uint16_t vr01; | 382 | uint16_t vr01; |
| 336 | 383 | ||
| 384 | ivch_reset(dvo); | ||
| 385 | |||
| 337 | /* Set the new power state of the panel. */ | 386 | /* Set the new power state of the panel. */ |
| 338 | if (!ivch_read(dvo, VR01, &vr01)) | 387 | if (!ivch_read(dvo, VR01, &vr01)) |
| 339 | return false; | 388 | return false; |
| @@ -348,11 +397,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, | |||
| 348 | struct drm_display_mode *mode, | 397 | struct drm_display_mode *mode, |
| 349 | struct drm_display_mode *adjusted_mode) | 398 | struct drm_display_mode *adjusted_mode) |
| 350 | { | 399 | { |
| 400 | struct ivch_priv *priv = dvo->dev_priv; | ||
| 351 | uint16_t vr40 = 0; | 401 | uint16_t vr40 = 0; |
| 352 | uint16_t vr01 = 0; | 402 | uint16_t vr01 = 0; |
| 353 | uint16_t vr10; | 403 | uint16_t vr10; |
| 354 | 404 | ||
| 355 | ivch_read(dvo, VR10, &vr10); | 405 | ivch_reset(dvo); |
| 406 | |||
| 407 | vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1]; | ||
| 408 | |||
| 356 | /* Enable dithering for 18 bpp pipelines */ | 409 | /* Enable dithering for 18 bpp pipelines */ |
| 357 | vr10 &= VR10_INTERFACE_DEPTH_MASK; | 410 | vr10 &= VR10_INTERFACE_DEPTH_MASK; |
| 358 | if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18) | 411 | if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18) |
| @@ -366,7 +419,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, | |||
| 366 | uint16_t x_ratio, y_ratio; | 419 | uint16_t x_ratio, y_ratio; |
| 367 | 420 | ||
| 368 | vr01 |= VR01_PANEL_FIT_ENABLE; | 421 | vr01 |= VR01_PANEL_FIT_ENABLE; |
| 369 | vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING; | 422 | vr40 |= VR40_CLOCK_GATING_ENABLE; |
| 370 | x_ratio = (((mode->hdisplay - 1) << 16) / | 423 | x_ratio = (((mode->hdisplay - 1) << 16) / |
| 371 | (adjusted_mode->hdisplay - 1)) >> 2; | 424 | (adjusted_mode->hdisplay - 1)) >> 2; |
| 372 | y_ratio = (((mode->vdisplay - 1) << 16) / | 425 | y_ratio = (((mode->vdisplay - 1) << 16) / |
| @@ -381,8 +434,6 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, | |||
| 381 | 434 | ||
| 382 | ivch_write(dvo, VR01, vr01); | 435 | ivch_write(dvo, VR01, vr01); |
| 383 | ivch_write(dvo, VR40, vr40); | 436 | ivch_write(dvo, VR40, vr40); |
| 384 | |||
| 385 | ivch_dump_regs(dvo); | ||
| 386 | } | 437 | } |
| 387 | 438 | ||
| 388 | static void ivch_dump_regs(struct intel_dvo_device *dvo) | 439 | static void ivch_dump_regs(struct intel_dvo_device *dvo) |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 306d9e4e5cf3..430571b977db 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
| @@ -131,7 +131,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { | |||
| 131 | .mask = MI_GLOBAL_GTT, | 131 | .mask = MI_GLOBAL_GTT, |
| 132 | .expected = 0, | 132 | .expected = 0, |
| 133 | }}, ), | 133 | }}, ), |
| 134 | CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B, | 134 | CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, |
| 135 | .reg = { .offset = 1, .mask = 0x007FFFFC }, | 135 | .reg = { .offset = 1, .mask = 0x007FFFFC }, |
| 136 | .bits = {{ | 136 | .bits = {{ |
| 137 | .offset = 0, | 137 | .offset = 0, |
| @@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, | |||
| 1021 | * only MI_LOAD_REGISTER_IMM commands. | 1021 | * only MI_LOAD_REGISTER_IMM commands. |
| 1022 | */ | 1022 | */ |
| 1023 | if (reg_addr == OACONTROL) { | 1023 | if (reg_addr == OACONTROL) { |
| 1024 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { | 1024 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { |
| 1025 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); | 1025 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); |
| 1026 | return false; | 1026 | return false; |
| 1027 | } | 1027 | } |
| @@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, | |||
| 1035 | * allowed mask/value pair given in the whitelist entry. | 1035 | * allowed mask/value pair given in the whitelist entry. |
| 1036 | */ | 1036 | */ |
| 1037 | if (reg->mask) { | 1037 | if (reg->mask) { |
| 1038 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { | 1038 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { |
| 1039 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", | 1039 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", |
| 1040 | reg_addr); | 1040 | reg_addr); |
| 1041 | return false; | 1041 | return false; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 82bbe3f2a7e1..bc817da9fef7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -117,6 +117,20 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj) | |||
| 117 | return i915_gem_obj_to_ggtt(obj) ? "g" : " "; | 117 | return i915_gem_obj_to_ggtt(obj) ? "g" : " "; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) | ||
| 121 | { | ||
| 122 | u64 size = 0; | ||
| 123 | struct i915_vma *vma; | ||
| 124 | |||
| 125 | list_for_each_entry(vma, &obj->vma_list, vma_link) { | ||
| 126 | if (i915_is_ggtt(vma->vm) && | ||
| 127 | drm_mm_node_allocated(&vma->node)) | ||
| 128 | size += vma->node.size; | ||
| 129 | } | ||
| 130 | |||
| 131 | return size; | ||
| 132 | } | ||
| 133 | |||
| 120 | static void | 134 | static void |
| 121 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | 135 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) |
| 122 | { | 136 | { |
| @@ -156,13 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
| 156 | if (obj->fence_reg != I915_FENCE_REG_NONE) | 170 | if (obj->fence_reg != I915_FENCE_REG_NONE) |
| 157 | seq_printf(m, " (fence: %d)", obj->fence_reg); | 171 | seq_printf(m, " (fence: %d)", obj->fence_reg); |
| 158 | list_for_each_entry(vma, &obj->vma_list, vma_link) { | 172 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
| 159 | if (!i915_is_ggtt(vma->vm)) | 173 | seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", |
| 160 | seq_puts(m, " (pp"); | 174 | i915_is_ggtt(vma->vm) ? "g" : "pp", |
| 175 | vma->node.start, vma->node.size); | ||
| 176 | if (i915_is_ggtt(vma->vm)) | ||
| 177 | seq_printf(m, ", type: %u)", vma->ggtt_view.type); | ||
| 161 | else | 178 | else |
| 162 | seq_puts(m, " (g"); | 179 | seq_puts(m, ")"); |
| 163 | seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", | ||
| 164 | vma->node.start, vma->node.size, | ||
| 165 | vma->ggtt_view.type); | ||
| 166 | } | 180 | } |
| 167 | if (obj->stolen) | 181 | if (obj->stolen) |
| 168 | seq_printf(m, " (stolen: %08llx)", obj->stolen->start); | 182 | seq_printf(m, " (stolen: %08llx)", obj->stolen->start); |
| @@ -198,7 +212,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
| 198 | struct drm_i915_private *dev_priv = dev->dev_private; | 212 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 199 | struct i915_address_space *vm = &dev_priv->gtt.base; | 213 | struct i915_address_space *vm = &dev_priv->gtt.base; |
| 200 | struct i915_vma *vma; | 214 | struct i915_vma *vma; |
| 201 | size_t total_obj_size, total_gtt_size; | 215 | u64 total_obj_size, total_gtt_size; |
| 202 | int count, ret; | 216 | int count, ret; |
| 203 | 217 | ||
| 204 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 218 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| @@ -231,7 +245,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
| 231 | } | 245 | } |
| 232 | mutex_unlock(&dev->struct_mutex); | 246 | mutex_unlock(&dev->struct_mutex); |
| 233 | 247 | ||
| 234 | seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", | 248 | seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", |
| 235 | count, total_obj_size, total_gtt_size); | 249 | count, total_obj_size, total_gtt_size); |
| 236 | return 0; | 250 | return 0; |
| 237 | } | 251 | } |
| @@ -253,7 +267,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) | |||
| 253 | struct drm_device *dev = node->minor->dev; | 267 | struct drm_device *dev = node->minor->dev; |
| 254 | struct drm_i915_private *dev_priv = dev->dev_private; | 268 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 255 | struct drm_i915_gem_object *obj; | 269 | struct drm_i915_gem_object *obj; |
| 256 | size_t total_obj_size, total_gtt_size; | 270 | u64 total_obj_size, total_gtt_size; |
| 257 | LIST_HEAD(stolen); | 271 | LIST_HEAD(stolen); |
| 258 | int count, ret; | 272 | int count, ret; |
| 259 | 273 | ||
| @@ -269,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) | |||
| 269 | list_add(&obj->obj_exec_link, &stolen); | 283 | list_add(&obj->obj_exec_link, &stolen); |
| 270 | 284 | ||
| 271 | total_obj_size += obj->base.size; | 285 | total_obj_size += obj->base.size; |
| 272 | total_gtt_size += i915_gem_obj_ggtt_size(obj); | 286 | total_gtt_size += i915_gem_obj_total_ggtt_size(obj); |
| 273 | count++; | 287 | count++; |
| 274 | } | 288 | } |
| 275 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { | 289 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { |
| @@ -292,14 +306,14 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) | |||
| 292 | } | 306 | } |
| 293 | mutex_unlock(&dev->struct_mutex); | 307 | mutex_unlock(&dev->struct_mutex); |
| 294 | 308 | ||
| 295 | seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", | 309 | seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", |
| 296 | count, total_obj_size, total_gtt_size); | 310 | count, total_obj_size, total_gtt_size); |
| 297 | return 0; | 311 | return 0; |
| 298 | } | 312 | } |
| 299 | 313 | ||
| 300 | #define count_objects(list, member) do { \ | 314 | #define count_objects(list, member) do { \ |
| 301 | list_for_each_entry(obj, list, member) { \ | 315 | list_for_each_entry(obj, list, member) { \ |
| 302 | size += i915_gem_obj_ggtt_size(obj); \ | 316 | size += i915_gem_obj_total_ggtt_size(obj); \ |
| 303 | ++count; \ | 317 | ++count; \ |
| 304 | if (obj->map_and_fenceable) { \ | 318 | if (obj->map_and_fenceable) { \ |
| 305 | mappable_size += i915_gem_obj_ggtt_size(obj); \ | 319 | mappable_size += i915_gem_obj_ggtt_size(obj); \ |
| @@ -310,10 +324,10 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) | |||
| 310 | 324 | ||
| 311 | struct file_stats { | 325 | struct file_stats { |
| 312 | struct drm_i915_file_private *file_priv; | 326 | struct drm_i915_file_private *file_priv; |
| 313 | int count; | 327 | unsigned long count; |
| 314 | size_t total, unbound; | 328 | u64 total, unbound; |
| 315 | size_t global, shared; | 329 | u64 global, shared; |
| 316 | size_t active, inactive; | 330 | u64 active, inactive; |
| 317 | }; | 331 | }; |
| 318 | 332 | ||
| 319 | static int per_file_stats(int id, void *ptr, void *data) | 333 | static int per_file_stats(int id, void *ptr, void *data) |
| @@ -370,7 +384,7 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
| 370 | 384 | ||
| 371 | #define print_file_stats(m, name, stats) do { \ | 385 | #define print_file_stats(m, name, stats) do { \ |
| 372 | if (stats.count) \ | 386 | if (stats.count) \ |
| 373 | seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \ | 387 | seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ |
| 374 | name, \ | 388 | name, \ |
| 375 | stats.count, \ | 389 | stats.count, \ |
| 376 | stats.total, \ | 390 | stats.total, \ |
| @@ -405,7 +419,7 @@ static void print_batch_pool_stats(struct seq_file *m, | |||
| 405 | 419 | ||
| 406 | #define count_vmas(list, member) do { \ | 420 | #define count_vmas(list, member) do { \ |
| 407 | list_for_each_entry(vma, list, member) { \ | 421 | list_for_each_entry(vma, list, member) { \ |
| 408 | size += i915_gem_obj_ggtt_size(vma->obj); \ | 422 | size += i915_gem_obj_total_ggtt_size(vma->obj); \ |
| 409 | ++count; \ | 423 | ++count; \ |
| 410 | if (vma->obj->map_and_fenceable) { \ | 424 | if (vma->obj->map_and_fenceable) { \ |
| 411 | mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ | 425 | mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ |
| @@ -420,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
| 420 | struct drm_device *dev = node->minor->dev; | 434 | struct drm_device *dev = node->minor->dev; |
| 421 | struct drm_i915_private *dev_priv = dev->dev_private; | 435 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 422 | u32 count, mappable_count, purgeable_count; | 436 | u32 count, mappable_count, purgeable_count; |
| 423 | size_t size, mappable_size, purgeable_size; | 437 | u64 size, mappable_size, purgeable_size; |
| 424 | struct drm_i915_gem_object *obj; | 438 | struct drm_i915_gem_object *obj; |
| 425 | struct i915_address_space *vm = &dev_priv->gtt.base; | 439 | struct i915_address_space *vm = &dev_priv->gtt.base; |
| 426 | struct drm_file *file; | 440 | struct drm_file *file; |
| @@ -437,17 +451,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
| 437 | 451 | ||
| 438 | size = count = mappable_size = mappable_count = 0; | 452 | size = count = mappable_size = mappable_count = 0; |
| 439 | count_objects(&dev_priv->mm.bound_list, global_list); | 453 | count_objects(&dev_priv->mm.bound_list, global_list); |
| 440 | seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", | 454 | seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", |
| 441 | count, mappable_count, size, mappable_size); | 455 | count, mappable_count, size, mappable_size); |
| 442 | 456 | ||
| 443 | size = count = mappable_size = mappable_count = 0; | 457 | size = count = mappable_size = mappable_count = 0; |
| 444 | count_vmas(&vm->active_list, mm_list); | 458 | count_vmas(&vm->active_list, mm_list); |
| 445 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", | 459 | seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", |
| 446 | count, mappable_count, size, mappable_size); | 460 | count, mappable_count, size, mappable_size); |
| 447 | 461 | ||
| 448 | size = count = mappable_size = mappable_count = 0; | 462 | size = count = mappable_size = mappable_count = 0; |
| 449 | count_vmas(&vm->inactive_list, mm_list); | 463 | count_vmas(&vm->inactive_list, mm_list); |
| 450 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", | 464 | seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", |
| 451 | count, mappable_count, size, mappable_size); | 465 | count, mappable_count, size, mappable_size); |
| 452 | 466 | ||
| 453 | size = count = purgeable_size = purgeable_count = 0; | 467 | size = count = purgeable_size = purgeable_count = 0; |
| @@ -456,7 +470,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
| 456 | if (obj->madv == I915_MADV_DONTNEED) | 470 | if (obj->madv == I915_MADV_DONTNEED) |
| 457 | purgeable_size += obj->base.size, ++purgeable_count; | 471 | purgeable_size += obj->base.size, ++purgeable_count; |
| 458 | } | 472 | } |
| 459 | seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); | 473 | seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); |
| 460 | 474 | ||
| 461 | size = count = mappable_size = mappable_count = 0; | 475 | size = count = mappable_size = mappable_count = 0; |
| 462 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 476 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
| @@ -473,16 +487,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
| 473 | ++purgeable_count; | 487 | ++purgeable_count; |
| 474 | } | 488 | } |
| 475 | } | 489 | } |
| 476 | seq_printf(m, "%u purgeable objects, %zu bytes\n", | 490 | seq_printf(m, "%u purgeable objects, %llu bytes\n", |
| 477 | purgeable_count, purgeable_size); | 491 | purgeable_count, purgeable_size); |
| 478 | seq_printf(m, "%u pinned mappable objects, %zu bytes\n", | 492 | seq_printf(m, "%u pinned mappable objects, %llu bytes\n", |
| 479 | mappable_count, mappable_size); | 493 | mappable_count, mappable_size); |
| 480 | seq_printf(m, "%u fault mappable objects, %zu bytes\n", | 494 | seq_printf(m, "%u fault mappable objects, %llu bytes\n", |
| 481 | count, size); | 495 | count, size); |
| 482 | 496 | ||
| 483 | seq_printf(m, "%zu [%lu] gtt total\n", | 497 | seq_printf(m, "%llu [%llu] gtt total\n", |
| 484 | dev_priv->gtt.base.total, | 498 | dev_priv->gtt.base.total, |
| 485 | dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); | 499 | (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); |
| 486 | 500 | ||
| 487 | seq_putc(m, '\n'); | 501 | seq_putc(m, '\n'); |
| 488 | print_batch_pool_stats(m, dev_priv); | 502 | print_batch_pool_stats(m, dev_priv); |
| @@ -519,7 +533,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) | |||
| 519 | uintptr_t list = (uintptr_t) node->info_ent->data; | 533 | uintptr_t list = (uintptr_t) node->info_ent->data; |
| 520 | struct drm_i915_private *dev_priv = dev->dev_private; | 534 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 521 | struct drm_i915_gem_object *obj; | 535 | struct drm_i915_gem_object *obj; |
| 522 | size_t total_obj_size, total_gtt_size; | 536 | u64 total_obj_size, total_gtt_size; |
| 523 | int count, ret; | 537 | int count, ret; |
| 524 | 538 | ||
| 525 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 539 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| @@ -535,13 +549,13 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) | |||
| 535 | describe_obj(m, obj); | 549 | describe_obj(m, obj); |
| 536 | seq_putc(m, '\n'); | 550 | seq_putc(m, '\n'); |
| 537 | total_obj_size += obj->base.size; | 551 | total_obj_size += obj->base.size; |
| 538 | total_gtt_size += i915_gem_obj_ggtt_size(obj); | 552 | total_gtt_size += i915_gem_obj_total_ggtt_size(obj); |
| 539 | count++; | 553 | count++; |
| 540 | } | 554 | } |
| 541 | 555 | ||
| 542 | mutex_unlock(&dev->struct_mutex); | 556 | mutex_unlock(&dev->struct_mutex); |
| 543 | 557 | ||
| 544 | seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", | 558 | seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", |
| 545 | count, total_obj_size, total_gtt_size); | 559 | count, total_obj_size, total_gtt_size); |
| 546 | 560 | ||
| 547 | return 0; | 561 | return 0; |
| @@ -1132,9 +1146,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
| 1132 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | 1146 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); |
| 1133 | } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || | 1147 | } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || |
| 1134 | IS_BROADWELL(dev) || IS_GEN9(dev)) { | 1148 | IS_BROADWELL(dev) || IS_GEN9(dev)) { |
| 1135 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 1149 | u32 rp_state_limits; |
| 1136 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | 1150 | u32 gt_perf_status; |
| 1137 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 1151 | u32 rp_state_cap; |
| 1138 | u32 rpmodectl, rpinclimit, rpdeclimit; | 1152 | u32 rpmodectl, rpinclimit, rpdeclimit; |
| 1139 | u32 rpstat, cagf, reqf; | 1153 | u32 rpstat, cagf, reqf; |
| 1140 | u32 rpupei, rpcurup, rpprevup; | 1154 | u32 rpupei, rpcurup, rpprevup; |
| @@ -1142,6 +1156,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
| 1142 | u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; | 1156 | u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; |
| 1143 | int max_freq; | 1157 | int max_freq; |
| 1144 | 1158 | ||
| 1159 | rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | ||
| 1160 | if (IS_BROXTON(dev)) { | ||
| 1161 | rp_state_cap = I915_READ(BXT_RP_STATE_CAP); | ||
| 1162 | gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); | ||
| 1163 | } else { | ||
| 1164 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
| 1165 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
| 1166 | } | ||
| 1167 | |||
| 1145 | /* RPSTAT1 is in the GT power well */ | 1168 | /* RPSTAT1 is in the GT power well */ |
| 1146 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1169 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1147 | if (ret) | 1170 | if (ret) |
| @@ -1229,7 +1252,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
| 1229 | seq_printf(m, "Down threshold: %d%%\n", | 1252 | seq_printf(m, "Down threshold: %d%%\n", |
| 1230 | dev_priv->rps.down_threshold); | 1253 | dev_priv->rps.down_threshold); |
| 1231 | 1254 | ||
| 1232 | max_freq = (rp_state_cap & 0xff0000) >> 16; | 1255 | max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : |
| 1256 | rp_state_cap >> 16) & 0xff; | ||
| 1233 | max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); | 1257 | max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); |
| 1234 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", | 1258 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", |
| 1235 | intel_gpu_freq(dev_priv, max_freq)); | 1259 | intel_gpu_freq(dev_priv, max_freq)); |
| @@ -1239,7 +1263,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
| 1239 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", | 1263 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", |
| 1240 | intel_gpu_freq(dev_priv, max_freq)); | 1264 | intel_gpu_freq(dev_priv, max_freq)); |
| 1241 | 1265 | ||
| 1242 | max_freq = rp_state_cap & 0xff; | 1266 | max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : |
| 1267 | rp_state_cap >> 0) & 0xff; | ||
| 1243 | max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); | 1268 | max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); |
| 1244 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 1269 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
| 1245 | intel_gpu_freq(dev_priv, max_freq)); | 1270 | intel_gpu_freq(dev_priv, max_freq)); |
| @@ -1581,6 +1606,21 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
| 1581 | return ironlake_drpc_info(m); | 1606 | return ironlake_drpc_info(m); |
| 1582 | } | 1607 | } |
| 1583 | 1608 | ||
| 1609 | static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) | ||
| 1610 | { | ||
| 1611 | struct drm_info_node *node = m->private; | ||
| 1612 | struct drm_device *dev = node->minor->dev; | ||
| 1613 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1614 | |||
| 1615 | seq_printf(m, "FB tracking busy bits: 0x%08x\n", | ||
| 1616 | dev_priv->fb_tracking.busy_bits); | ||
| 1617 | |||
| 1618 | seq_printf(m, "FB tracking flip bits: 0x%08x\n", | ||
| 1619 | dev_priv->fb_tracking.flip_bits); | ||
| 1620 | |||
| 1621 | return 0; | ||
| 1622 | } | ||
| 1623 | |||
| 1584 | static int i915_fbc_status(struct seq_file *m, void *unused) | 1624 | static int i915_fbc_status(struct seq_file *m, void *unused) |
| 1585 | { | 1625 | { |
| 1586 | struct drm_info_node *node = m->private; | 1626 | struct drm_info_node *node = m->private; |
| @@ -1593,51 +1633,20 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
| 1593 | } | 1633 | } |
| 1594 | 1634 | ||
| 1595 | intel_runtime_pm_get(dev_priv); | 1635 | intel_runtime_pm_get(dev_priv); |
| 1636 | mutex_lock(&dev_priv->fbc.lock); | ||
| 1596 | 1637 | ||
| 1597 | if (intel_fbc_enabled(dev)) { | 1638 | if (intel_fbc_enabled(dev_priv)) |
| 1598 | seq_puts(m, "FBC enabled\n"); | 1639 | seq_puts(m, "FBC enabled\n"); |
| 1599 | } else { | 1640 | else |
| 1600 | seq_puts(m, "FBC disabled: "); | 1641 | seq_printf(m, "FBC disabled: %s\n", |
| 1601 | switch (dev_priv->fbc.no_fbc_reason) { | 1642 | intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason)); |
| 1602 | case FBC_OK: | 1643 | |
| 1603 | seq_puts(m, "FBC actived, but currently disabled in hardware"); | 1644 | if (INTEL_INFO(dev_priv)->gen >= 7) |
| 1604 | break; | 1645 | seq_printf(m, "Compressing: %s\n", |
| 1605 | case FBC_UNSUPPORTED: | 1646 | yesno(I915_READ(FBC_STATUS2) & |
| 1606 | seq_puts(m, "unsupported by this chipset"); | 1647 | FBC_COMPRESSION_MASK)); |
| 1607 | break; | ||
| 1608 | case FBC_NO_OUTPUT: | ||
| 1609 | seq_puts(m, "no outputs"); | ||
| 1610 | break; | ||
| 1611 | case FBC_STOLEN_TOO_SMALL: | ||
| 1612 | seq_puts(m, "not enough stolen memory"); | ||
| 1613 | break; | ||
| 1614 | case FBC_UNSUPPORTED_MODE: | ||
| 1615 | seq_puts(m, "mode not supported"); | ||
| 1616 | break; | ||
| 1617 | case FBC_MODE_TOO_LARGE: | ||
| 1618 | seq_puts(m, "mode too large"); | ||
| 1619 | break; | ||
| 1620 | case FBC_BAD_PLANE: | ||
| 1621 | seq_puts(m, "FBC unsupported on plane"); | ||
| 1622 | break; | ||
| 1623 | case FBC_NOT_TILED: | ||
| 1624 | seq_puts(m, "scanout buffer not tiled"); | ||
| 1625 | break; | ||
| 1626 | case FBC_MULTIPLE_PIPES: | ||
| 1627 | seq_puts(m, "multiple pipes are enabled"); | ||
| 1628 | break; | ||
| 1629 | case FBC_MODULE_PARAM: | ||
| 1630 | seq_puts(m, "disabled per module param (default off)"); | ||
| 1631 | break; | ||
| 1632 | case FBC_CHIP_DEFAULT: | ||
| 1633 | seq_puts(m, "disabled per chip default"); | ||
| 1634 | break; | ||
| 1635 | default: | ||
| 1636 | seq_puts(m, "unknown reason"); | ||
| 1637 | } | ||
| 1638 | seq_putc(m, '\n'); | ||
| 1639 | } | ||
| 1640 | 1648 | ||
| 1649 | mutex_unlock(&dev_priv->fbc.lock); | ||
| 1641 | intel_runtime_pm_put(dev_priv); | 1650 | intel_runtime_pm_put(dev_priv); |
| 1642 | 1651 | ||
| 1643 | return 0; | 1652 | return 0; |
| @@ -1651,9 +1660,7 @@ static int i915_fbc_fc_get(void *data, u64 *val) | |||
| 1651 | if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) | 1660 | if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) |
| 1652 | return -ENODEV; | 1661 | return -ENODEV; |
| 1653 | 1662 | ||
| 1654 | drm_modeset_lock_all(dev); | ||
| 1655 | *val = dev_priv->fbc.false_color; | 1663 | *val = dev_priv->fbc.false_color; |
| 1656 | drm_modeset_unlock_all(dev); | ||
| 1657 | 1664 | ||
| 1658 | return 0; | 1665 | return 0; |
| 1659 | } | 1666 | } |
| @@ -1667,7 +1674,7 @@ static int i915_fbc_fc_set(void *data, u64 val) | |||
| 1667 | if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) | 1674 | if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) |
| 1668 | return -ENODEV; | 1675 | return -ENODEV; |
| 1669 | 1676 | ||
| 1670 | drm_modeset_lock_all(dev); | 1677 | mutex_lock(&dev_priv->fbc.lock); |
| 1671 | 1678 | ||
| 1672 | reg = I915_READ(ILK_DPFC_CONTROL); | 1679 | reg = I915_READ(ILK_DPFC_CONTROL); |
| 1673 | dev_priv->fbc.false_color = val; | 1680 | dev_priv->fbc.false_color = val; |
| @@ -1676,7 +1683,7 @@ static int i915_fbc_fc_set(void *data, u64 val) | |||
| 1676 | (reg | FBC_CTL_FALSE_COLOR) : | 1683 | (reg | FBC_CTL_FALSE_COLOR) : |
| 1677 | (reg & ~FBC_CTL_FALSE_COLOR)); | 1684 | (reg & ~FBC_CTL_FALSE_COLOR)); |
| 1678 | 1685 | ||
| 1679 | drm_modeset_unlock_all(dev); | 1686 | mutex_unlock(&dev_priv->fbc.lock); |
| 1680 | return 0; | 1687 | return 0; |
| 1681 | } | 1688 | } |
| 1682 | 1689 | ||
| @@ -1778,8 +1785,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
| 1778 | struct drm_i915_private *dev_priv = dev->dev_private; | 1785 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1779 | int ret = 0; | 1786 | int ret = 0; |
| 1780 | int gpu_freq, ia_freq; | 1787 | int gpu_freq, ia_freq; |
| 1788 | unsigned int max_gpu_freq, min_gpu_freq; | ||
| 1781 | 1789 | ||
| 1782 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) { | 1790 | if (!HAS_CORE_RING_FREQ(dev)) { |
| 1783 | seq_puts(m, "unsupported on this chipset\n"); | 1791 | seq_puts(m, "unsupported on this chipset\n"); |
| 1784 | return 0; | 1792 | return 0; |
| 1785 | } | 1793 | } |
| @@ -1792,17 +1800,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
| 1792 | if (ret) | 1800 | if (ret) |
| 1793 | goto out; | 1801 | goto out; |
| 1794 | 1802 | ||
| 1803 | if (IS_SKYLAKE(dev)) { | ||
| 1804 | /* Convert GT frequency to 50 HZ units */ | ||
| 1805 | min_gpu_freq = | ||
| 1806 | dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; | ||
| 1807 | max_gpu_freq = | ||
| 1808 | dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; | ||
| 1809 | } else { | ||
| 1810 | min_gpu_freq = dev_priv->rps.min_freq_softlimit; | ||
| 1811 | max_gpu_freq = dev_priv->rps.max_freq_softlimit; | ||
| 1812 | } | ||
| 1813 | |||
| 1795 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); | 1814 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); |
| 1796 | 1815 | ||
| 1797 | for (gpu_freq = dev_priv->rps.min_freq_softlimit; | 1816 | for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { |
| 1798 | gpu_freq <= dev_priv->rps.max_freq_softlimit; | ||
| 1799 | gpu_freq++) { | ||
| 1800 | ia_freq = gpu_freq; | 1817 | ia_freq = gpu_freq; |
| 1801 | sandybridge_pcode_read(dev_priv, | 1818 | sandybridge_pcode_read(dev_priv, |
| 1802 | GEN6_PCODE_READ_MIN_FREQ_TABLE, | 1819 | GEN6_PCODE_READ_MIN_FREQ_TABLE, |
| 1803 | &ia_freq); | 1820 | &ia_freq); |
| 1804 | seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", | 1821 | seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", |
| 1805 | intel_gpu_freq(dev_priv, gpu_freq), | 1822 | intel_gpu_freq(dev_priv, (gpu_freq * |
| 1823 | (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))), | ||
| 1806 | ((ia_freq >> 0) & 0xff) * 100, | 1824 | ((ia_freq >> 0) & 0xff) * 100, |
| 1807 | ((ia_freq >> 8) & 0xff) * 100); | 1825 | ((ia_freq >> 8) & 0xff) * 100); |
| 1808 | } | 1826 | } |
| @@ -2248,7 +2266,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) | |||
| 2248 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | 2266 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
| 2249 | 2267 | ||
| 2250 | seq_puts(m, "aliasing PPGTT:\n"); | 2268 | seq_puts(m, "aliasing PPGTT:\n"); |
| 2251 | seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset); | 2269 | seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); |
| 2252 | 2270 | ||
| 2253 | ppgtt->debug_dump(ppgtt, m); | 2271 | ppgtt->debug_dump(ppgtt, m); |
| 2254 | } | 2272 | } |
| @@ -2479,13 +2497,13 @@ static int i915_energy_uJ(struct seq_file *m, void *data) | |||
| 2479 | return 0; | 2497 | return 0; |
| 2480 | } | 2498 | } |
| 2481 | 2499 | ||
| 2482 | static int i915_pc8_status(struct seq_file *m, void *unused) | 2500 | static int i915_runtime_pm_status(struct seq_file *m, void *unused) |
| 2483 | { | 2501 | { |
| 2484 | struct drm_info_node *node = m->private; | 2502 | struct drm_info_node *node = m->private; |
| 2485 | struct drm_device *dev = node->minor->dev; | 2503 | struct drm_device *dev = node->minor->dev; |
| 2486 | struct drm_i915_private *dev_priv = dev->dev_private; | 2504 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2487 | 2505 | ||
| 2488 | if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { | 2506 | if (!HAS_RUNTIME_PM(dev)) { |
| 2489 | seq_puts(m, "not supported\n"); | 2507 | seq_puts(m, "not supported\n"); |
| 2490 | return 0; | 2508 | return 0; |
| 2491 | } | 2509 | } |
| @@ -2493,6 +2511,12 @@ static int i915_pc8_status(struct seq_file *m, void *unused) | |||
| 2493 | seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); | 2511 | seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); |
| 2494 | seq_printf(m, "IRQs disabled: %s\n", | 2512 | seq_printf(m, "IRQs disabled: %s\n", |
| 2495 | yesno(!intel_irqs_enabled(dev_priv))); | 2513 | yesno(!intel_irqs_enabled(dev_priv))); |
| 2514 | #ifdef CONFIG_PM | ||
| 2515 | seq_printf(m, "Usage count: %d\n", | ||
| 2516 | atomic_read(&dev->dev->power.usage_count)); | ||
| 2517 | #else | ||
| 2518 | seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); | ||
| 2519 | #endif | ||
| 2496 | 2520 | ||
| 2497 | return 0; | 2521 | return 0; |
| 2498 | } | 2522 | } |
| @@ -2780,13 +2804,16 @@ static int i915_display_info(struct seq_file *m, void *unused) | |||
| 2780 | seq_printf(m, "---------\n"); | 2804 | seq_printf(m, "---------\n"); |
| 2781 | for_each_intel_crtc(dev, crtc) { | 2805 | for_each_intel_crtc(dev, crtc) { |
| 2782 | bool active; | 2806 | bool active; |
| 2807 | struct intel_crtc_state *pipe_config; | ||
| 2783 | int x, y; | 2808 | int x, y; |
| 2784 | 2809 | ||
| 2810 | pipe_config = to_intel_crtc_state(crtc->base.state); | ||
| 2811 | |||
| 2785 | seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", | 2812 | seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", |
| 2786 | crtc->base.base.id, pipe_name(crtc->pipe), | 2813 | crtc->base.base.id, pipe_name(crtc->pipe), |
| 2787 | yesno(crtc->active), crtc->config->pipe_src_w, | 2814 | yesno(pipe_config->base.active), |
| 2788 | crtc->config->pipe_src_h); | 2815 | pipe_config->pipe_src_w, pipe_config->pipe_src_h); |
| 2789 | if (crtc->active) { | 2816 | if (pipe_config->base.active) { |
| 2790 | intel_crtc_info(m, crtc); | 2817 | intel_crtc_info(m, crtc); |
| 2791 | 2818 | ||
| 2792 | active = cursor_position(dev, crtc->pipe, &x, &y); | 2819 | active = cursor_position(dev, crtc->pipe, &x, &y); |
| @@ -3027,7 +3054,7 @@ static void drrs_status_per_crtc(struct seq_file *m, | |||
| 3027 | 3054 | ||
| 3028 | seq_puts(m, "\n\n"); | 3055 | seq_puts(m, "\n\n"); |
| 3029 | 3056 | ||
| 3030 | if (intel_crtc->config->has_drrs) { | 3057 | if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { |
| 3031 | struct intel_panel *panel; | 3058 | struct intel_panel *panel; |
| 3032 | 3059 | ||
| 3033 | mutex_lock(&drrs->mutex); | 3060 | mutex_lock(&drrs->mutex); |
| @@ -3079,7 +3106,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused) | |||
| 3079 | for_each_intel_crtc(dev, intel_crtc) { | 3106 | for_each_intel_crtc(dev, intel_crtc) { |
| 3080 | drm_modeset_lock(&intel_crtc->base.mutex, NULL); | 3107 | drm_modeset_lock(&intel_crtc->base.mutex, NULL); |
| 3081 | 3108 | ||
| 3082 | if (intel_crtc->active) { | 3109 | if (intel_crtc->base.state->active) { |
| 3083 | active_crtc_cnt++; | 3110 | active_crtc_cnt++; |
| 3084 | seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); | 3111 | seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); |
| 3085 | 3112 | ||
| @@ -3621,22 +3648,33 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev) | |||
| 3621 | struct drm_i915_private *dev_priv = dev->dev_private; | 3648 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3622 | struct intel_crtc *crtc = | 3649 | struct intel_crtc *crtc = |
| 3623 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); | 3650 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); |
| 3651 | struct intel_crtc_state *pipe_config; | ||
| 3624 | 3652 | ||
| 3625 | drm_modeset_lock_all(dev); | 3653 | drm_modeset_lock_all(dev); |
| 3654 | pipe_config = to_intel_crtc_state(crtc->base.state); | ||
| 3655 | |||
| 3626 | /* | 3656 | /* |
| 3627 | * If we use the eDP transcoder we need to make sure that we don't | 3657 | * If we use the eDP transcoder we need to make sure that we don't |
| 3628 | * bypass the pfit, since otherwise the pipe CRC source won't work. Only | 3658 | * bypass the pfit, since otherwise the pipe CRC source won't work. Only |
| 3629 | * relevant on hsw with pipe A when using the always-on power well | 3659 | * relevant on hsw with pipe A when using the always-on power well |
| 3630 | * routing. | 3660 | * routing. |
| 3631 | */ | 3661 | */ |
| 3632 | if (crtc->config->cpu_transcoder == TRANSCODER_EDP && | 3662 | if (pipe_config->cpu_transcoder == TRANSCODER_EDP && |
| 3633 | !crtc->config->pch_pfit.enabled) { | 3663 | !pipe_config->pch_pfit.enabled) { |
| 3634 | crtc->config->pch_pfit.force_thru = true; | 3664 | bool active = pipe_config->base.active; |
| 3665 | |||
| 3666 | if (active) { | ||
| 3667 | intel_crtc_control(&crtc->base, false); | ||
| 3668 | pipe_config = to_intel_crtc_state(crtc->base.state); | ||
| 3669 | } | ||
| 3670 | |||
| 3671 | pipe_config->pch_pfit.force_thru = true; | ||
| 3635 | 3672 | ||
| 3636 | intel_display_power_get(dev_priv, | 3673 | intel_display_power_get(dev_priv, |
| 3637 | POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); | 3674 | POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); |
| 3638 | 3675 | ||
| 3639 | intel_crtc_reset(crtc); | 3676 | if (active) |
| 3677 | intel_crtc_control(&crtc->base, true); | ||
| 3640 | } | 3678 | } |
| 3641 | drm_modeset_unlock_all(dev); | 3679 | drm_modeset_unlock_all(dev); |
| 3642 | } | 3680 | } |
| @@ -3646,6 +3684,7 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) | |||
| 3646 | struct drm_i915_private *dev_priv = dev->dev_private; | 3684 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3647 | struct intel_crtc *crtc = | 3685 | struct intel_crtc *crtc = |
| 3648 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); | 3686 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); |
| 3687 | struct intel_crtc_state *pipe_config; | ||
| 3649 | 3688 | ||
| 3650 | drm_modeset_lock_all(dev); | 3689 | drm_modeset_lock_all(dev); |
| 3651 | /* | 3690 | /* |
| @@ -3654,13 +3693,22 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) | |||
| 3654 | * relevant on hsw with pipe A when using the always-on power well | 3693 | * relevant on hsw with pipe A when using the always-on power well |
| 3655 | * routing. | 3694 | * routing. |
| 3656 | */ | 3695 | */ |
| 3657 | if (crtc->config->pch_pfit.force_thru) { | 3696 | pipe_config = to_intel_crtc_state(crtc->base.state); |
| 3658 | crtc->config->pch_pfit.force_thru = false; | 3697 | if (pipe_config->pch_pfit.force_thru) { |
| 3698 | bool active = pipe_config->base.active; | ||
| 3699 | |||
| 3700 | if (active) { | ||
| 3701 | intel_crtc_control(&crtc->base, false); | ||
| 3702 | pipe_config = to_intel_crtc_state(crtc->base.state); | ||
| 3703 | } | ||
| 3659 | 3704 | ||
| 3660 | intel_crtc_reset(crtc); | 3705 | pipe_config->pch_pfit.force_thru = false; |
| 3661 | 3706 | ||
| 3662 | intel_display_power_put(dev_priv, | 3707 | intel_display_power_put(dev_priv, |
| 3663 | POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); | 3708 | POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); |
| 3709 | |||
| 3710 | if (active) | ||
| 3711 | intel_crtc_control(&crtc->base, true); | ||
| 3664 | } | 3712 | } |
| 3665 | drm_modeset_unlock_all(dev); | 3713 | drm_modeset_unlock_all(dev); |
| 3666 | } | 3714 | } |
| @@ -3776,7 +3824,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
| 3776 | pipe_name(pipe)); | 3824 | pipe_name(pipe)); |
| 3777 | 3825 | ||
| 3778 | drm_modeset_lock(&crtc->base.mutex, NULL); | 3826 | drm_modeset_lock(&crtc->base.mutex, NULL); |
| 3779 | if (crtc->active) | 3827 | if (crtc->base.state->active) |
| 3780 | intel_wait_for_vblank(dev, pipe); | 3828 | intel_wait_for_vblank(dev, pipe); |
| 3781 | drm_modeset_unlock(&crtc->base.mutex); | 3829 | drm_modeset_unlock(&crtc->base.mutex); |
| 3782 | 3830 | ||
| @@ -4183,8 +4231,15 @@ static const struct file_operations i915_displayport_test_type_fops = { | |||
| 4183 | static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) | 4231 | static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) |
| 4184 | { | 4232 | { |
| 4185 | struct drm_device *dev = m->private; | 4233 | struct drm_device *dev = m->private; |
| 4186 | int num_levels = ilk_wm_max_level(dev) + 1; | ||
| 4187 | int level; | 4234 | int level; |
| 4235 | int num_levels; | ||
| 4236 | |||
| 4237 | if (IS_CHERRYVIEW(dev)) | ||
| 4238 | num_levels = 3; | ||
| 4239 | else if (IS_VALLEYVIEW(dev)) | ||
| 4240 | num_levels = 1; | ||
| 4241 | else | ||
| 4242 | num_levels = ilk_wm_max_level(dev) + 1; | ||
| 4188 | 4243 | ||
| 4189 | drm_modeset_lock_all(dev); | 4244 | drm_modeset_lock_all(dev); |
| 4190 | 4245 | ||
| @@ -4193,9 +4248,9 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) | |||
| 4193 | 4248 | ||
| 4194 | /* | 4249 | /* |
| 4195 | * - WM1+ latency values in 0.5us units | 4250 | * - WM1+ latency values in 0.5us units |
| 4196 | * - latencies are in us on gen9 | 4251 | * - latencies are in us on gen9/vlv/chv |
| 4197 | */ | 4252 | */ |
| 4198 | if (INTEL_INFO(dev)->gen >= 9) | 4253 | if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev)) |
| 4199 | latency *= 10; | 4254 | latency *= 10; |
| 4200 | else if (level > 0) | 4255 | else if (level > 0) |
| 4201 | latency *= 5; | 4256 | latency *= 5; |
| @@ -4259,7 +4314,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file) | |||
| 4259 | { | 4314 | { |
| 4260 | struct drm_device *dev = inode->i_private; | 4315 | struct drm_device *dev = inode->i_private; |
| 4261 | 4316 | ||
| 4262 | if (HAS_GMCH_DISPLAY(dev)) | 4317 | if (INTEL_INFO(dev)->gen < 5) |
| 4263 | return -ENODEV; | 4318 | return -ENODEV; |
| 4264 | 4319 | ||
| 4265 | return single_open(file, pri_wm_latency_show, dev); | 4320 | return single_open(file, pri_wm_latency_show, dev); |
| @@ -4291,11 +4346,18 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, | |||
| 4291 | struct seq_file *m = file->private_data; | 4346 | struct seq_file *m = file->private_data; |
| 4292 | struct drm_device *dev = m->private; | 4347 | struct drm_device *dev = m->private; |
| 4293 | uint16_t new[8] = { 0 }; | 4348 | uint16_t new[8] = { 0 }; |
| 4294 | int num_levels = ilk_wm_max_level(dev) + 1; | 4349 | int num_levels; |
| 4295 | int level; | 4350 | int level; |
| 4296 | int ret; | 4351 | int ret; |
| 4297 | char tmp[32]; | 4352 | char tmp[32]; |
| 4298 | 4353 | ||
| 4354 | if (IS_CHERRYVIEW(dev)) | ||
| 4355 | num_levels = 3; | ||
| 4356 | else if (IS_VALLEYVIEW(dev)) | ||
| 4357 | num_levels = 1; | ||
| 4358 | else | ||
| 4359 | num_levels = ilk_wm_max_level(dev) + 1; | ||
| 4360 | |||
| 4299 | if (len >= sizeof(tmp)) | 4361 | if (len >= sizeof(tmp)) |
| 4300 | return -EINVAL; | 4362 | return -EINVAL; |
| 4301 | 4363 | ||
| @@ -5027,6 +5089,7 @@ static const struct drm_info_list i915_debugfs_list[] = { | |||
| 5027 | {"i915_drpc_info", i915_drpc_info, 0}, | 5089 | {"i915_drpc_info", i915_drpc_info, 0}, |
| 5028 | {"i915_emon_status", i915_emon_status, 0}, | 5090 | {"i915_emon_status", i915_emon_status, 0}, |
| 5029 | {"i915_ring_freq_table", i915_ring_freq_table, 0}, | 5091 | {"i915_ring_freq_table", i915_ring_freq_table, 0}, |
| 5092 | {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, | ||
| 5030 | {"i915_fbc_status", i915_fbc_status, 0}, | 5093 | {"i915_fbc_status", i915_fbc_status, 0}, |
| 5031 | {"i915_ips_status", i915_ips_status, 0}, | 5094 | {"i915_ips_status", i915_ips_status, 0}, |
| 5032 | {"i915_sr_status", i915_sr_status, 0}, | 5095 | {"i915_sr_status", i915_sr_status, 0}, |
| @@ -5042,7 +5105,7 @@ static const struct drm_info_list i915_debugfs_list[] = { | |||
| 5042 | {"i915_edp_psr_status", i915_edp_psr_status, 0}, | 5105 | {"i915_edp_psr_status", i915_edp_psr_status, 0}, |
| 5043 | {"i915_sink_crc_eDP1", i915_sink_crc, 0}, | 5106 | {"i915_sink_crc_eDP1", i915_sink_crc, 0}, |
| 5044 | {"i915_energy_uJ", i915_energy_uJ, 0}, | 5107 | {"i915_energy_uJ", i915_energy_uJ, 0}, |
| 5045 | {"i915_pc8_status", i915_pc8_status, 0}, | 5108 | {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, |
| 5046 | {"i915_power_domain_info", i915_power_domain_info, 0}, | 5109 | {"i915_power_domain_info", i915_power_domain_info, 0}, |
| 5047 | {"i915_display_info", i915_display_info, 0}, | 5110 | {"i915_display_info", i915_display_info, 0}, |
| 5048 | {"i915_semaphore_status", i915_semaphore_status, 0}, | 5111 | {"i915_semaphore_status", i915_semaphore_status, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d2df321ba634..b1f9e5561cf2 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
| 163 | if (!value) | 163 | if (!value) |
| 164 | return -ENODEV; | 164 | return -ENODEV; |
| 165 | break; | 165 | break; |
| 166 | case I915_PARAM_HAS_GPU_RESET: | ||
| 167 | value = i915.enable_hangcheck && | ||
| 168 | intel_has_gpu_reset(dev); | ||
| 169 | break; | ||
| 170 | case I915_PARAM_HAS_RESOURCE_STREAMER: | ||
| 171 | value = HAS_RESOURCE_STREAMER(dev); | ||
| 172 | break; | ||
| 166 | default: | 173 | default: |
| 167 | DRM_DEBUG("Unknown parameter %d\n", param->param); | 174 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
| 168 | return -EINVAL; | 175 | return -EINVAL; |
| @@ -719,11 +726,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev) | |||
| 719 | 726 | ||
| 720 | info = (struct intel_device_info *)&dev_priv->info; | 727 | info = (struct intel_device_info *)&dev_priv->info; |
| 721 | 728 | ||
| 729 | /* | ||
| 730 | * Skylake and Broxton currently don't expose the topmost plane as its | ||
| 731 | * use is exclusive with the legacy cursor and we only want to expose | ||
| 732 | * one of those, not both. Until we can safely expose the topmost plane | ||
| 733 | * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, | ||
| 734 | * we don't expose the topmost plane at all to prevent ABI breakage | ||
| 735 | * down the line. | ||
| 736 | */ | ||
| 722 | if (IS_BROXTON(dev)) { | 737 | if (IS_BROXTON(dev)) { |
| 723 | info->num_sprites[PIPE_A] = 3; | 738 | info->num_sprites[PIPE_A] = 2; |
| 724 | info->num_sprites[PIPE_B] = 3; | 739 | info->num_sprites[PIPE_B] = 2; |
| 725 | info->num_sprites[PIPE_C] = 2; | 740 | info->num_sprites[PIPE_C] = 1; |
| 726 | } else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9) | 741 | } else if (IS_VALLEYVIEW(dev)) |
| 727 | for_each_pipe(dev_priv, pipe) | 742 | for_each_pipe(dev_priv, pipe) |
| 728 | info->num_sprites[pipe] = 2; | 743 | info->num_sprites[pipe] = 2; |
| 729 | else | 744 | else |
| @@ -933,8 +948,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 933 | goto out_mtrrfree; | 948 | goto out_mtrrfree; |
| 934 | } | 949 | } |
| 935 | 950 | ||
| 936 | dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0); | 951 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); |
| 937 | if (dev_priv->dp_wq == NULL) { | 952 | if (dev_priv->hotplug.dp_wq == NULL) { |
| 938 | DRM_ERROR("Failed to create our dp workqueue.\n"); | 953 | DRM_ERROR("Failed to create our dp workqueue.\n"); |
| 939 | ret = -ENOMEM; | 954 | ret = -ENOMEM; |
| 940 | goto out_freewq; | 955 | goto out_freewq; |
| @@ -1029,7 +1044,7 @@ out_gem_unload: | |||
| 1029 | pm_qos_remove_request(&dev_priv->pm_qos); | 1044 | pm_qos_remove_request(&dev_priv->pm_qos); |
| 1030 | destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); | 1045 | destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); |
| 1031 | out_freedpwq: | 1046 | out_freedpwq: |
| 1032 | destroy_workqueue(dev_priv->dp_wq); | 1047 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
| 1033 | out_freewq: | 1048 | out_freewq: |
| 1034 | destroy_workqueue(dev_priv->wq); | 1049 | destroy_workqueue(dev_priv->wq); |
| 1035 | out_mtrrfree: | 1050 | out_mtrrfree: |
| @@ -1116,6 +1131,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1116 | i915_gem_cleanup_ringbuffer(dev); | 1131 | i915_gem_cleanup_ringbuffer(dev); |
| 1117 | i915_gem_context_fini(dev); | 1132 | i915_gem_context_fini(dev); |
| 1118 | mutex_unlock(&dev->struct_mutex); | 1133 | mutex_unlock(&dev->struct_mutex); |
| 1134 | intel_fbc_cleanup_cfb(dev_priv); | ||
| 1119 | i915_gem_cleanup_stolen(dev); | 1135 | i915_gem_cleanup_stolen(dev); |
| 1120 | 1136 | ||
| 1121 | intel_csr_ucode_fini(dev); | 1137 | intel_csr_ucode_fini(dev); |
| @@ -1123,7 +1139,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1123 | intel_teardown_gmbus(dev); | 1139 | intel_teardown_gmbus(dev); |
| 1124 | intel_teardown_mchbar(dev); | 1140 | intel_teardown_mchbar(dev); |
| 1125 | 1141 | ||
| 1126 | destroy_workqueue(dev_priv->dp_wq); | 1142 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
| 1127 | destroy_workqueue(dev_priv->wq); | 1143 | destroy_workqueue(dev_priv->wq); |
| 1128 | destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); | 1144 | destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); |
| 1129 | pm_qos_remove_request(&dev_priv->pm_qos); | 1145 | pm_qos_remove_request(&dev_priv->pm_qos); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 884b4f9b81c4..0d6775a3e88c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -356,7 +356,6 @@ static const struct intel_device_info intel_cherryview_info = { | |||
| 356 | }; | 356 | }; |
| 357 | 357 | ||
| 358 | static const struct intel_device_info intel_skylake_info = { | 358 | static const struct intel_device_info intel_skylake_info = { |
| 359 | .is_preliminary = 1, | ||
| 360 | .is_skylake = 1, | 359 | .is_skylake = 1, |
| 361 | .gen = 9, .num_pipes = 3, | 360 | .gen = 9, .num_pipes = 3, |
| 362 | .need_gfx_hws = 1, .has_hotplug = 1, | 361 | .need_gfx_hws = 1, .has_hotplug = 1, |
| @@ -369,7 +368,6 @@ static const struct intel_device_info intel_skylake_info = { | |||
| 369 | }; | 368 | }; |
| 370 | 369 | ||
| 371 | static const struct intel_device_info intel_skylake_gt3_info = { | 370 | static const struct intel_device_info intel_skylake_gt3_info = { |
| 372 | .is_preliminary = 1, | ||
| 373 | .is_skylake = 1, | 371 | .is_skylake = 1, |
| 374 | .gen = 9, .num_pipes = 3, | 372 | .gen = 9, .num_pipes = 3, |
| 375 | .need_gfx_hws = 1, .has_hotplug = 1, | 373 | .need_gfx_hws = 1, .has_hotplug = 1, |
| @@ -440,9 +438,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
| 440 | {0, 0, 0} | 438 | {0, 0, 0} |
| 441 | }; | 439 | }; |
| 442 | 440 | ||
| 443 | #if defined(CONFIG_DRM_I915_KMS) | ||
| 444 | MODULE_DEVICE_TABLE(pci, pciidlist); | 441 | MODULE_DEVICE_TABLE(pci, pciidlist); |
| 445 | #endif | ||
| 446 | 442 | ||
| 447 | void intel_detect_pch(struct drm_device *dev) | 443 | void intel_detect_pch(struct drm_device *dev) |
| 448 | { | 444 | { |
| @@ -541,21 +537,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) | |||
| 541 | return true; | 537 | return true; |
| 542 | } | 538 | } |
| 543 | 539 | ||
| 544 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) | ||
| 545 | { | ||
| 546 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 547 | |||
| 548 | dev_priv->long_hpd_port_mask = 0; | ||
| 549 | dev_priv->short_hpd_port_mask = 0; | ||
| 550 | dev_priv->hpd_event_bits = 0; | ||
| 551 | |||
| 552 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 553 | |||
| 554 | cancel_work_sync(&dev_priv->dig_port_work); | ||
| 555 | cancel_work_sync(&dev_priv->hotplug_work); | ||
| 556 | cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); | ||
| 557 | } | ||
| 558 | |||
| 559 | void i915_firmware_load_error_print(const char *fw_path, int err) | 540 | void i915_firmware_load_error_print(const char *fw_path, int err) |
| 560 | { | 541 | { |
| 561 | DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err); | 542 | DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err); |
| @@ -601,7 +582,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv); | |||
| 601 | static int i915_drm_suspend(struct drm_device *dev) | 582 | static int i915_drm_suspend(struct drm_device *dev) |
| 602 | { | 583 | { |
| 603 | struct drm_i915_private *dev_priv = dev->dev_private; | 584 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 604 | struct drm_crtc *crtc; | ||
| 605 | pci_power_t opregion_target_state; | 585 | pci_power_t opregion_target_state; |
| 606 | int error; | 586 | int error; |
| 607 | 587 | ||
| @@ -632,8 +612,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
| 632 | * for _thaw. Also, power gate the CRTC power wells. | 612 | * for _thaw. Also, power gate the CRTC power wells. |
| 633 | */ | 613 | */ |
| 634 | drm_modeset_lock_all(dev); | 614 | drm_modeset_lock_all(dev); |
| 635 | for_each_crtc(dev, crtc) | 615 | intel_display_suspend(dev); |
| 636 | intel_crtc_control(crtc, false); | ||
| 637 | drm_modeset_unlock_all(dev); | 616 | drm_modeset_unlock_all(dev); |
| 638 | 617 | ||
| 639 | intel_dp_mst_suspend(dev); | 618 | intel_dp_mst_suspend(dev); |
| @@ -760,7 +739,7 @@ static int i915_drm_resume(struct drm_device *dev) | |||
| 760 | spin_unlock_irq(&dev_priv->irq_lock); | 739 | spin_unlock_irq(&dev_priv->irq_lock); |
| 761 | 740 | ||
| 762 | drm_modeset_lock_all(dev); | 741 | drm_modeset_lock_all(dev); |
| 763 | intel_modeset_setup_hw_state(dev, true); | 742 | intel_display_resume(dev); |
| 764 | drm_modeset_unlock_all(dev); | 743 | drm_modeset_unlock_all(dev); |
| 765 | 744 | ||
| 766 | intel_dp_mst_resume(dev); | 745 | intel_dp_mst_resume(dev); |
| @@ -865,9 +844,6 @@ int i915_reset(struct drm_device *dev) | |||
| 865 | bool simulated; | 844 | bool simulated; |
| 866 | int ret; | 845 | int ret; |
| 867 | 846 | ||
| 868 | if (!i915.reset) | ||
| 869 | return 0; | ||
| 870 | |||
| 871 | intel_reset_gt_powersave(dev); | 847 | intel_reset_gt_powersave(dev); |
| 872 | 848 | ||
| 873 | mutex_lock(&dev->struct_mutex); | 849 | mutex_lock(&dev->struct_mutex); |
| @@ -1727,20 +1703,14 @@ static int __init i915_init(void) | |||
| 1727 | driver.num_ioctls = i915_max_ioctl; | 1703 | driver.num_ioctls = i915_max_ioctl; |
| 1728 | 1704 | ||
| 1729 | /* | 1705 | /* |
| 1730 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | 1706 | * Enable KMS by default, unless explicitly overriden by |
| 1731 | * explicitly disabled with the module pararmeter. | 1707 | * either the i915.modeset prarameter or by the |
| 1732 | * | 1708 | * vga_text_mode_force boot option. |
| 1733 | * Otherwise, just follow the parameter (defaulting to off). | ||
| 1734 | * | ||
| 1735 | * Allow optional vga_text_mode_force boot option to override | ||
| 1736 | * the default behavior. | ||
| 1737 | */ | 1709 | */ |
| 1738 | #if defined(CONFIG_DRM_I915_KMS) | 1710 | driver.driver_features |= DRIVER_MODESET; |
| 1739 | if (i915.modeset != 0) | 1711 | |
| 1740 | driver.driver_features |= DRIVER_MODESET; | 1712 | if (i915.modeset == 0) |
| 1741 | #endif | 1713 | driver.driver_features &= ~DRIVER_MODESET; |
| 1742 | if (i915.modeset == 1) | ||
| 1743 | driver.driver_features |= DRIVER_MODESET; | ||
| 1744 | 1714 | ||
| 1745 | #ifdef CONFIG_VGA_CONSOLE | 1715 | #ifdef CONFIG_VGA_CONSOLE |
| 1746 | if (vgacon_text_force() && i915.modeset == -1) | 1716 | if (vgacon_text_force() && i915.modeset == -1) |
| @@ -1759,7 +1729,7 @@ static int __init i915_init(void) | |||
| 1759 | * to the atomic ioctl and the atomic properties. Only plane operations on | 1729 | * to the atomic ioctl and the atomic properties. Only plane operations on |
| 1760 | * a single CRTC will actually work. | 1730 | * a single CRTC will actually work. |
| 1761 | */ | 1731 | */ |
| 1762 | if (i915.nuclear_pageflip) | 1732 | if (driver.driver_features & DRIVER_MODESET) |
| 1763 | driver.driver_features |= DRIVER_ATOMIC; | 1733 | driver.driver_features |= DRIVER_ATOMIC; |
| 1764 | 1734 | ||
| 1765 | return drm_pci_init(&driver, &i915_pci_driver); | 1735 | return drm_pci_init(&driver, &i915_pci_driver); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5f27290201e0..23ce125e0298 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -56,7 +56,7 @@ | |||
| 56 | 56 | ||
| 57 | #define DRIVER_NAME "i915" | 57 | #define DRIVER_NAME "i915" |
| 58 | #define DRIVER_DESC "Intel Graphics" | 58 | #define DRIVER_DESC "Intel Graphics" |
| 59 | #define DRIVER_DATE "20150522" | 59 | #define DRIVER_DATE "20150717" |
| 60 | 60 | ||
| 61 | #undef WARN_ON | 61 | #undef WARN_ON |
| 62 | /* Many gcc seem to no see through this and fall over :( */ | 62 | /* Many gcc seem to no see through this and fall over :( */ |
| @@ -217,6 +217,39 @@ enum hpd_pin { | |||
| 217 | HPD_NUM_PINS | 217 | HPD_NUM_PINS |
| 218 | }; | 218 | }; |
| 219 | 219 | ||
| 220 | #define for_each_hpd_pin(__pin) \ | ||
| 221 | for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) | ||
| 222 | |||
| 223 | struct i915_hotplug { | ||
| 224 | struct work_struct hotplug_work; | ||
| 225 | |||
| 226 | struct { | ||
| 227 | unsigned long last_jiffies; | ||
| 228 | int count; | ||
| 229 | enum { | ||
| 230 | HPD_ENABLED = 0, | ||
| 231 | HPD_DISABLED = 1, | ||
| 232 | HPD_MARK_DISABLED = 2 | ||
| 233 | } state; | ||
| 234 | } stats[HPD_NUM_PINS]; | ||
| 235 | u32 event_bits; | ||
| 236 | struct delayed_work reenable_work; | ||
| 237 | |||
| 238 | struct intel_digital_port *irq_port[I915_MAX_PORTS]; | ||
| 239 | u32 long_port_mask; | ||
| 240 | u32 short_port_mask; | ||
| 241 | struct work_struct dig_port_work; | ||
| 242 | |||
| 243 | /* | ||
| 244 | * if we get a HPD irq from DP and a HPD irq from non-DP | ||
| 245 | * the non-DP HPD could block the workqueue on a mode config | ||
| 246 | * mutex getting, that userspace may have taken. However | ||
| 247 | * userspace is waiting on the DP workqueue to run which is | ||
| 248 | * blocked behind the non-DP one. | ||
| 249 | */ | ||
| 250 | struct workqueue_struct *dp_wq; | ||
| 251 | }; | ||
| 252 | |||
| 220 | #define I915_GEM_GPU_DOMAINS \ | 253 | #define I915_GEM_GPU_DOMAINS \ |
| 221 | (I915_GEM_DOMAIN_RENDER | \ | 254 | (I915_GEM_DOMAIN_RENDER | \ |
| 222 | I915_GEM_DOMAIN_SAMPLER | \ | 255 | I915_GEM_DOMAIN_SAMPLER | \ |
| @@ -243,6 +276,12 @@ enum hpd_pin { | |||
| 243 | &dev->mode_config.plane_list, \ | 276 | &dev->mode_config.plane_list, \ |
| 244 | base.head) | 277 | base.head) |
| 245 | 278 | ||
| 279 | #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ | ||
| 280 | list_for_each_entry(intel_plane, \ | ||
| 281 | &(dev)->mode_config.plane_list, \ | ||
| 282 | base.head) \ | ||
| 283 | if ((intel_plane)->pipe == (intel_crtc)->pipe) | ||
| 284 | |||
| 246 | #define for_each_intel_crtc(dev, intel_crtc) \ | 285 | #define for_each_intel_crtc(dev, intel_crtc) \ |
| 247 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) | 286 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) |
| 248 | 287 | ||
| @@ -333,7 +372,8 @@ struct intel_dpll_hw_state { | |||
| 333 | uint32_t cfgcr1, cfgcr2; | 372 | uint32_t cfgcr1, cfgcr2; |
| 334 | 373 | ||
| 335 | /* bxt */ | 374 | /* bxt */ |
| 336 | uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12; | 375 | uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, |
| 376 | pcsdw12; | ||
| 337 | }; | 377 | }; |
| 338 | 378 | ||
| 339 | struct intel_shared_dpll_config { | 379 | struct intel_shared_dpll_config { |
| @@ -343,7 +383,6 @@ struct intel_shared_dpll_config { | |||
| 343 | 383 | ||
| 344 | struct intel_shared_dpll { | 384 | struct intel_shared_dpll { |
| 345 | struct intel_shared_dpll_config config; | 385 | struct intel_shared_dpll_config config; |
| 346 | struct intel_shared_dpll_config *new_config; | ||
| 347 | 386 | ||
| 348 | int active; /* count of number of active CRTCs (i.e. DPMS on) */ | 387 | int active; /* count of number of active CRTCs (i.e. DPMS on) */ |
| 349 | bool on; /* is the PLL actually active? Disabled during modeset */ | 388 | bool on; /* is the PLL actually active? Disabled during modeset */ |
| @@ -559,9 +598,6 @@ struct intel_limit; | |||
| 559 | struct dpll; | 598 | struct dpll; |
| 560 | 599 | ||
| 561 | struct drm_i915_display_funcs { | 600 | struct drm_i915_display_funcs { |
| 562 | bool (*fbc_enabled)(struct drm_device *dev); | ||
| 563 | void (*enable_fbc)(struct drm_crtc *crtc); | ||
| 564 | void (*disable_fbc)(struct drm_device *dev); | ||
| 565 | int (*get_display_clock_speed)(struct drm_device *dev); | 601 | int (*get_display_clock_speed)(struct drm_device *dev); |
| 566 | int (*get_fifo_size)(struct drm_device *dev, int plane); | 602 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
| 567 | /** | 603 | /** |
| @@ -587,7 +623,8 @@ struct drm_i915_display_funcs { | |||
| 587 | struct drm_crtc *crtc, | 623 | struct drm_crtc *crtc, |
| 588 | uint32_t sprite_width, uint32_t sprite_height, | 624 | uint32_t sprite_width, uint32_t sprite_height, |
| 589 | int pixel_size, bool enable, bool scaled); | 625 | int pixel_size, bool enable, bool scaled); |
| 590 | void (*modeset_global_resources)(struct drm_atomic_state *state); | 626 | int (*modeset_calc_cdclk)(struct drm_atomic_state *state); |
| 627 | void (*modeset_commit_cdclk)(struct drm_atomic_state *state); | ||
| 591 | /* Returns the active state of the crtc, and if the crtc is active, | 628 | /* Returns the active state of the crtc, and if the crtc is active, |
| 592 | * fills out the pipe-config with the hw state. */ | 629 | * fills out the pipe-config with the hw state. */ |
| 593 | bool (*get_pipe_config)(struct intel_crtc *, | 630 | bool (*get_pipe_config)(struct intel_crtc *, |
| @@ -598,7 +635,6 @@ struct drm_i915_display_funcs { | |||
| 598 | struct intel_crtc_state *crtc_state); | 635 | struct intel_crtc_state *crtc_state); |
| 599 | void (*crtc_enable)(struct drm_crtc *crtc); | 636 | void (*crtc_enable)(struct drm_crtc *crtc); |
| 600 | void (*crtc_disable)(struct drm_crtc *crtc); | 637 | void (*crtc_disable)(struct drm_crtc *crtc); |
| 601 | void (*off)(struct drm_crtc *crtc); | ||
| 602 | void (*audio_codec_enable)(struct drm_connector *connector, | 638 | void (*audio_codec_enable)(struct drm_connector *connector, |
| 603 | struct intel_encoder *encoder, | 639 | struct intel_encoder *encoder, |
| 604 | struct drm_display_mode *mode); | 640 | struct drm_display_mode *mode); |
| @@ -608,7 +644,7 @@ struct drm_i915_display_funcs { | |||
| 608 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, | 644 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
| 609 | struct drm_framebuffer *fb, | 645 | struct drm_framebuffer *fb, |
| 610 | struct drm_i915_gem_object *obj, | 646 | struct drm_i915_gem_object *obj, |
| 611 | struct intel_engine_cs *ring, | 647 | struct drm_i915_gem_request *req, |
| 612 | uint32_t flags); | 648 | uint32_t flags); |
| 613 | void (*update_primary_plane)(struct drm_crtc *crtc, | 649 | void (*update_primary_plane)(struct drm_crtc *crtc, |
| 614 | struct drm_framebuffer *fb, | 650 | struct drm_framebuffer *fb, |
| @@ -805,11 +841,15 @@ struct i915_ctx_hang_stats { | |||
| 805 | 841 | ||
| 806 | /* This must match up with the value previously used for execbuf2.rsvd1. */ | 842 | /* This must match up with the value previously used for execbuf2.rsvd1. */ |
| 807 | #define DEFAULT_CONTEXT_HANDLE 0 | 843 | #define DEFAULT_CONTEXT_HANDLE 0 |
| 844 | |||
| 845 | #define CONTEXT_NO_ZEROMAP (1<<0) | ||
| 808 | /** | 846 | /** |
| 809 | * struct intel_context - as the name implies, represents a context. | 847 | * struct intel_context - as the name implies, represents a context. |
| 810 | * @ref: reference count. | 848 | * @ref: reference count. |
| 811 | * @user_handle: userspace tracking identity for this context. | 849 | * @user_handle: userspace tracking identity for this context. |
| 812 | * @remap_slice: l3 row remapping information. | 850 | * @remap_slice: l3 row remapping information. |
| 851 | * @flags: context specific flags: | ||
| 852 | * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. | ||
| 813 | * @file_priv: filp associated with this context (NULL for global default | 853 | * @file_priv: filp associated with this context (NULL for global default |
| 814 | * context). | 854 | * context). |
| 815 | * @hang_stats: information about the role of this context in possible GPU | 855 | * @hang_stats: information about the role of this context in possible GPU |
| @@ -827,6 +867,7 @@ struct intel_context { | |||
| 827 | int user_handle; | 867 | int user_handle; |
| 828 | uint8_t remap_slice; | 868 | uint8_t remap_slice; |
| 829 | struct drm_i915_private *i915; | 869 | struct drm_i915_private *i915; |
| 870 | int flags; | ||
| 830 | struct drm_i915_file_private *file_priv; | 871 | struct drm_i915_file_private *file_priv; |
| 831 | struct i915_ctx_hang_stats hang_stats; | 872 | struct i915_ctx_hang_stats hang_stats; |
| 832 | struct i915_hw_ppgtt *ppgtt; | 873 | struct i915_hw_ppgtt *ppgtt; |
| @@ -856,6 +897,9 @@ enum fb_op_origin { | |||
| 856 | }; | 897 | }; |
| 857 | 898 | ||
| 858 | struct i915_fbc { | 899 | struct i915_fbc { |
| 900 | /* This is always the inner lock when overlapping with struct_mutex and | ||
| 901 | * it's the outer lock when overlapping with stolen_lock. */ | ||
| 902 | struct mutex lock; | ||
| 859 | unsigned long uncompressed_size; | 903 | unsigned long uncompressed_size; |
| 860 | unsigned threshold; | 904 | unsigned threshold; |
| 861 | unsigned int fb_id; | 905 | unsigned int fb_id; |
| @@ -875,7 +919,7 @@ struct i915_fbc { | |||
| 875 | 919 | ||
| 876 | struct intel_fbc_work { | 920 | struct intel_fbc_work { |
| 877 | struct delayed_work work; | 921 | struct delayed_work work; |
| 878 | struct drm_crtc *crtc; | 922 | struct intel_crtc *crtc; |
| 879 | struct drm_framebuffer *fb; | 923 | struct drm_framebuffer *fb; |
| 880 | } *fbc_work; | 924 | } *fbc_work; |
| 881 | 925 | ||
| @@ -891,7 +935,13 @@ struct i915_fbc { | |||
| 891 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ | 935 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ |
| 892 | FBC_MODULE_PARAM, | 936 | FBC_MODULE_PARAM, |
| 893 | FBC_CHIP_DEFAULT, /* disabled by default on this chip */ | 937 | FBC_CHIP_DEFAULT, /* disabled by default on this chip */ |
| 938 | FBC_ROTATION, /* rotation is not supported */ | ||
| 939 | FBC_IN_DBG_MASTER, /* kernel debugger is active */ | ||
| 894 | } no_fbc_reason; | 940 | } no_fbc_reason; |
| 941 | |||
| 942 | bool (*fbc_enabled)(struct drm_i915_private *dev_priv); | ||
| 943 | void (*enable_fbc)(struct intel_crtc *crtc); | ||
| 944 | void (*disable_fbc)(struct drm_i915_private *dev_priv); | ||
| 895 | }; | 945 | }; |
| 896 | 946 | ||
| 897 | /** | 947 | /** |
| @@ -1201,6 +1251,10 @@ struct intel_l3_parity { | |||
| 1201 | struct i915_gem_mm { | 1251 | struct i915_gem_mm { |
| 1202 | /** Memory allocator for GTT stolen memory */ | 1252 | /** Memory allocator for GTT stolen memory */ |
| 1203 | struct drm_mm stolen; | 1253 | struct drm_mm stolen; |
| 1254 | /** Protects the usage of the GTT stolen memory allocator. This is | ||
| 1255 | * always the inner lock when overlapping with struct_mutex. */ | ||
| 1256 | struct mutex stolen_lock; | ||
| 1257 | |||
| 1204 | /** List of all objects in gtt_space. Used to restore gtt | 1258 | /** List of all objects in gtt_space. Used to restore gtt |
| 1205 | * mappings on resume */ | 1259 | * mappings on resume */ |
| 1206 | struct list_head bound_list; | 1260 | struct list_head bound_list; |
| @@ -1461,23 +1515,27 @@ struct ilk_wm_values { | |||
| 1461 | enum intel_ddb_partitioning partitioning; | 1515 | enum intel_ddb_partitioning partitioning; |
| 1462 | }; | 1516 | }; |
| 1463 | 1517 | ||
| 1464 | struct vlv_wm_values { | 1518 | struct vlv_pipe_wm { |
| 1465 | struct { | 1519 | uint16_t primary; |
| 1466 | uint16_t primary; | 1520 | uint16_t sprite[2]; |
| 1467 | uint16_t sprite[2]; | 1521 | uint8_t cursor; |
| 1468 | uint8_t cursor; | 1522 | }; |
| 1469 | } pipe[3]; | ||
| 1470 | 1523 | ||
| 1471 | struct { | 1524 | struct vlv_sr_wm { |
| 1472 | uint16_t plane; | 1525 | uint16_t plane; |
| 1473 | uint8_t cursor; | 1526 | uint8_t cursor; |
| 1474 | } sr; | 1527 | }; |
| 1475 | 1528 | ||
| 1529 | struct vlv_wm_values { | ||
| 1530 | struct vlv_pipe_wm pipe[3]; | ||
| 1531 | struct vlv_sr_wm sr; | ||
| 1476 | struct { | 1532 | struct { |
| 1477 | uint8_t cursor; | 1533 | uint8_t cursor; |
| 1478 | uint8_t sprite[2]; | 1534 | uint8_t sprite[2]; |
| 1479 | uint8_t primary; | 1535 | uint8_t primary; |
| 1480 | } ddl[3]; | 1536 | } ddl[3]; |
| 1537 | uint8_t level; | ||
| 1538 | bool cxsr; | ||
| 1481 | }; | 1539 | }; |
| 1482 | 1540 | ||
| 1483 | struct skl_ddb_entry { | 1541 | struct skl_ddb_entry { |
| @@ -1611,6 +1669,18 @@ struct i915_virtual_gpu { | |||
| 1611 | bool active; | 1669 | bool active; |
| 1612 | }; | 1670 | }; |
| 1613 | 1671 | ||
| 1672 | struct i915_execbuffer_params { | ||
| 1673 | struct drm_device *dev; | ||
| 1674 | struct drm_file *file; | ||
| 1675 | uint32_t dispatch_flags; | ||
| 1676 | uint32_t args_batch_start_offset; | ||
| 1677 | uint32_t batch_obj_vm_offset; | ||
| 1678 | struct intel_engine_cs *ring; | ||
| 1679 | struct drm_i915_gem_object *batch_obj; | ||
| 1680 | struct intel_context *ctx; | ||
| 1681 | struct drm_i915_gem_request *request; | ||
| 1682 | }; | ||
| 1683 | |||
| 1614 | struct drm_i915_private { | 1684 | struct drm_i915_private { |
| 1615 | struct drm_device *dev; | 1685 | struct drm_device *dev; |
| 1616 | struct kmem_cache *objects; | 1686 | struct kmem_cache *objects; |
| @@ -1680,19 +1750,7 @@ struct drm_i915_private { | |||
| 1680 | u32 pm_rps_events; | 1750 | u32 pm_rps_events; |
| 1681 | u32 pipestat_irq_mask[I915_MAX_PIPES]; | 1751 | u32 pipestat_irq_mask[I915_MAX_PIPES]; |
| 1682 | 1752 | ||
| 1683 | struct work_struct hotplug_work; | 1753 | struct i915_hotplug hotplug; |
| 1684 | struct { | ||
| 1685 | unsigned long hpd_last_jiffies; | ||
| 1686 | int hpd_cnt; | ||
| 1687 | enum { | ||
| 1688 | HPD_ENABLED = 0, | ||
| 1689 | HPD_DISABLED = 1, | ||
| 1690 | HPD_MARK_DISABLED = 2 | ||
| 1691 | } hpd_mark; | ||
| 1692 | } hpd_stats[HPD_NUM_PINS]; | ||
| 1693 | u32 hpd_event_bits; | ||
| 1694 | struct delayed_work hotplug_reenable_work; | ||
| 1695 | |||
| 1696 | struct i915_fbc fbc; | 1754 | struct i915_fbc fbc; |
| 1697 | struct i915_drrs drrs; | 1755 | struct i915_drrs drrs; |
| 1698 | struct intel_opregion opregion; | 1756 | struct intel_opregion opregion; |
| @@ -1718,7 +1776,7 @@ struct drm_i915_private { | |||
| 1718 | 1776 | ||
| 1719 | unsigned int fsb_freq, mem_freq, is_ddr3; | 1777 | unsigned int fsb_freq, mem_freq, is_ddr3; |
| 1720 | unsigned int skl_boot_cdclk; | 1778 | unsigned int skl_boot_cdclk; |
| 1721 | unsigned int cdclk_freq; | 1779 | unsigned int cdclk_freq, max_cdclk_freq; |
| 1722 | unsigned int hpll_freq; | 1780 | unsigned int hpll_freq; |
| 1723 | 1781 | ||
| 1724 | /** | 1782 | /** |
| @@ -1769,9 +1827,6 @@ struct drm_i915_private { | |||
| 1769 | 1827 | ||
| 1770 | /* Reclocking support */ | 1828 | /* Reclocking support */ |
| 1771 | bool render_reclock_avail; | 1829 | bool render_reclock_avail; |
| 1772 | bool lvds_downclock_avail; | ||
| 1773 | /* indicates the reduced downclock for LVDS*/ | ||
| 1774 | int lvds_downclock; | ||
| 1775 | 1830 | ||
| 1776 | struct i915_frontbuffer_tracking fb_tracking; | 1831 | struct i915_frontbuffer_tracking fb_tracking; |
| 1777 | 1832 | ||
| @@ -1858,29 +1913,11 @@ struct drm_i915_private { | |||
| 1858 | 1913 | ||
| 1859 | struct i915_runtime_pm pm; | 1914 | struct i915_runtime_pm pm; |
| 1860 | 1915 | ||
| 1861 | struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; | ||
| 1862 | u32 long_hpd_port_mask; | ||
| 1863 | u32 short_hpd_port_mask; | ||
| 1864 | struct work_struct dig_port_work; | ||
| 1865 | |||
| 1866 | /* | ||
| 1867 | * if we get a HPD irq from DP and a HPD irq from non-DP | ||
| 1868 | * the non-DP HPD could block the workqueue on a mode config | ||
| 1869 | * mutex getting, that userspace may have taken. However | ||
| 1870 | * userspace is waiting on the DP workqueue to run which is | ||
| 1871 | * blocked behind the non-DP one. | ||
| 1872 | */ | ||
| 1873 | struct workqueue_struct *dp_wq; | ||
| 1874 | |||
| 1875 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ | 1916 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ |
| 1876 | struct { | 1917 | struct { |
| 1877 | int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file, | 1918 | int (*execbuf_submit)(struct i915_execbuffer_params *params, |
| 1878 | struct intel_engine_cs *ring, | ||
| 1879 | struct intel_context *ctx, | ||
| 1880 | struct drm_i915_gem_execbuffer2 *args, | 1919 | struct drm_i915_gem_execbuffer2 *args, |
| 1881 | struct list_head *vmas, | 1920 | struct list_head *vmas); |
| 1882 | struct drm_i915_gem_object *batch_obj, | ||
| 1883 | u64 exec_start, u32 flags); | ||
| 1884 | int (*init_rings)(struct drm_device *dev); | 1921 | int (*init_rings)(struct drm_device *dev); |
| 1885 | void (*cleanup_ring)(struct intel_engine_cs *ring); | 1922 | void (*cleanup_ring)(struct intel_engine_cs *ring); |
| 1886 | void (*stop_ring)(struct intel_engine_cs *ring); | 1923 | void (*stop_ring)(struct intel_engine_cs *ring); |
| @@ -2148,7 +2185,8 @@ struct drm_i915_gem_request { | |||
| 2148 | struct intel_context *ctx; | 2185 | struct intel_context *ctx; |
| 2149 | struct intel_ringbuffer *ringbuf; | 2186 | struct intel_ringbuffer *ringbuf; |
| 2150 | 2187 | ||
| 2151 | /** Batch buffer related to this request if any */ | 2188 | /** Batch buffer related to this request if any (used for |
| 2189 | error state dump only) */ | ||
| 2152 | struct drm_i915_gem_object *batch_obj; | 2190 | struct drm_i915_gem_object *batch_obj; |
| 2153 | 2191 | ||
| 2154 | /** Time at which this request was emitted, in jiffies. */ | 2192 | /** Time at which this request was emitted, in jiffies. */ |
| @@ -2186,8 +2224,12 @@ struct drm_i915_gem_request { | |||
| 2186 | }; | 2224 | }; |
| 2187 | 2225 | ||
| 2188 | int i915_gem_request_alloc(struct intel_engine_cs *ring, | 2226 | int i915_gem_request_alloc(struct intel_engine_cs *ring, |
| 2189 | struct intel_context *ctx); | 2227 | struct intel_context *ctx, |
| 2228 | struct drm_i915_gem_request **req_out); | ||
| 2229 | void i915_gem_request_cancel(struct drm_i915_gem_request *req); | ||
| 2190 | void i915_gem_request_free(struct kref *req_ref); | 2230 | void i915_gem_request_free(struct kref *req_ref); |
| 2231 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, | ||
| 2232 | struct drm_file *file); | ||
| 2191 | 2233 | ||
| 2192 | static inline uint32_t | 2234 | static inline uint32_t |
| 2193 | i915_gem_request_get_seqno(struct drm_i915_gem_request *req) | 2235 | i915_gem_request_get_seqno(struct drm_i915_gem_request *req) |
| @@ -2391,6 +2433,9 @@ struct drm_i915_cmd_table { | |||
| 2391 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ | 2433 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
| 2392 | (INTEL_DEVID(dev) & 0xf) == 0xb || \ | 2434 | (INTEL_DEVID(dev) & 0xf) == 0xb || \ |
| 2393 | (INTEL_DEVID(dev) & 0xf) == 0xe)) | 2435 | (INTEL_DEVID(dev) & 0xf) == 0xe)) |
| 2436 | /* ULX machines are also considered ULT. */ | ||
| 2437 | #define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ | ||
| 2438 | (INTEL_DEVID(dev) & 0xf) == 0xe) | ||
| 2394 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ | 2439 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
| 2395 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | 2440 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
| 2396 | #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ | 2441 | #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ |
| @@ -2400,6 +2445,14 @@ struct drm_i915_cmd_table { | |||
| 2400 | /* ULX machines are also considered ULT. */ | 2445 | /* ULX machines are also considered ULT. */ |
| 2401 | #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ | 2446 | #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ |
| 2402 | INTEL_DEVID(dev) == 0x0A1E) | 2447 | INTEL_DEVID(dev) == 0x0A1E) |
| 2448 | #define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ | ||
| 2449 | INTEL_DEVID(dev) == 0x1913 || \ | ||
| 2450 | INTEL_DEVID(dev) == 0x1916 || \ | ||
| 2451 | INTEL_DEVID(dev) == 0x1921 || \ | ||
| 2452 | INTEL_DEVID(dev) == 0x1926) | ||
| 2453 | #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ | ||
| 2454 | INTEL_DEVID(dev) == 0x1915 || \ | ||
| 2455 | INTEL_DEVID(dev) == 0x191E) | ||
| 2403 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | 2456 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
| 2404 | 2457 | ||
| 2405 | #define SKL_REVID_A0 (0x0) | 2458 | #define SKL_REVID_A0 (0x0) |
| @@ -2466,9 +2519,6 @@ struct drm_i915_cmd_table { | |||
| 2466 | */ | 2519 | */ |
| 2467 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ | 2520 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ |
| 2468 | IS_I915GM(dev))) | 2521 | IS_I915GM(dev))) |
| 2469 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) | ||
| 2470 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
| 2471 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
| 2472 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) | 2522 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
| 2473 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | 2523 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
| 2474 | 2524 | ||
| @@ -2494,6 +2544,12 @@ struct drm_i915_cmd_table { | |||
| 2494 | 2544 | ||
| 2495 | #define HAS_CSR(dev) (IS_SKYLAKE(dev)) | 2545 | #define HAS_CSR(dev) (IS_SKYLAKE(dev)) |
| 2496 | 2546 | ||
| 2547 | #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ | ||
| 2548 | INTEL_INFO(dev)->gen >= 8) | ||
| 2549 | |||
| 2550 | #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ | ||
| 2551 | !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) | ||
| 2552 | |||
| 2497 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 | 2553 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
| 2498 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 | 2554 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
| 2499 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 | 2555 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
| @@ -2533,7 +2589,6 @@ struct i915_params { | |||
| 2533 | int modeset; | 2589 | int modeset; |
| 2534 | int panel_ignore_lid; | 2590 | int panel_ignore_lid; |
| 2535 | int semaphores; | 2591 | int semaphores; |
| 2536 | unsigned int lvds_downclock; | ||
| 2537 | int lvds_channel_mode; | 2592 | int lvds_channel_mode; |
| 2538 | int panel_use_ssc; | 2593 | int panel_use_ssc; |
| 2539 | int vbt_sdvo_panel_type; | 2594 | int vbt_sdvo_panel_type; |
| @@ -2558,7 +2613,6 @@ struct i915_params { | |||
| 2558 | int use_mmio_flip; | 2613 | int use_mmio_flip; |
| 2559 | int mmio_debug; | 2614 | int mmio_debug; |
| 2560 | bool verbose_state_checks; | 2615 | bool verbose_state_checks; |
| 2561 | bool nuclear_pageflip; | ||
| 2562 | int edp_vswing; | 2616 | int edp_vswing; |
| 2563 | }; | 2617 | }; |
| 2564 | extern struct i915_params i915 __read_mostly; | 2618 | extern struct i915_params i915 __read_mostly; |
| @@ -2578,15 +2632,22 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | |||
| 2578 | unsigned long arg); | 2632 | unsigned long arg); |
| 2579 | #endif | 2633 | #endif |
| 2580 | extern int intel_gpu_reset(struct drm_device *dev); | 2634 | extern int intel_gpu_reset(struct drm_device *dev); |
| 2635 | extern bool intel_has_gpu_reset(struct drm_device *dev); | ||
| 2581 | extern int i915_reset(struct drm_device *dev); | 2636 | extern int i915_reset(struct drm_device *dev); |
| 2582 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); | 2637 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
| 2583 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | 2638 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
| 2584 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); | 2639 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
| 2585 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | 2640 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
| 2586 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); | 2641 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); |
| 2587 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); | ||
| 2588 | void i915_firmware_load_error_print(const char *fw_path, int err); | 2642 | void i915_firmware_load_error_print(const char *fw_path, int err); |
| 2589 | 2643 | ||
| 2644 | /* intel_hotplug.c */ | ||
| 2645 | void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); | ||
| 2646 | void intel_hpd_init(struct drm_i915_private *dev_priv); | ||
| 2647 | void intel_hpd_init_work(struct drm_i915_private *dev_priv); | ||
| 2648 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); | ||
| 2649 | enum port intel_hpd_pin_to_port(enum hpd_pin pin); | ||
| 2650 | |||
| 2590 | /* i915_irq.c */ | 2651 | /* i915_irq.c */ |
| 2591 | void i915_queue_hangcheck(struct drm_device *dev); | 2652 | void i915_queue_hangcheck(struct drm_device *dev); |
| 2592 | __printf(3, 4) | 2653 | __printf(3, 4) |
| @@ -2594,7 +2655,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged, | |||
| 2594 | const char *fmt, ...); | 2655 | const char *fmt, ...); |
| 2595 | 2656 | ||
| 2596 | extern void intel_irq_init(struct drm_i915_private *dev_priv); | 2657 | extern void intel_irq_init(struct drm_i915_private *dev_priv); |
| 2597 | extern void intel_hpd_init(struct drm_i915_private *dev_priv); | ||
| 2598 | int intel_irq_install(struct drm_i915_private *dev_priv); | 2658 | int intel_irq_install(struct drm_i915_private *dev_priv); |
| 2599 | void intel_irq_uninstall(struct drm_i915_private *dev_priv); | 2659 | void intel_irq_uninstall(struct drm_i915_private *dev_priv); |
| 2600 | 2660 | ||
| @@ -2661,19 +2721,11 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
| 2661 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 2721 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
| 2662 | struct drm_file *file_priv); | 2722 | struct drm_file *file_priv); |
| 2663 | void i915_gem_execbuffer_move_to_active(struct list_head *vmas, | 2723 | void i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
| 2664 | struct intel_engine_cs *ring); | 2724 | struct drm_i915_gem_request *req); |
| 2665 | void i915_gem_execbuffer_retire_commands(struct drm_device *dev, | 2725 | void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); |
| 2666 | struct drm_file *file, | 2726 | int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, |
| 2667 | struct intel_engine_cs *ring, | ||
| 2668 | struct drm_i915_gem_object *obj); | ||
| 2669 | int i915_gem_ringbuffer_submission(struct drm_device *dev, | ||
| 2670 | struct drm_file *file, | ||
| 2671 | struct intel_engine_cs *ring, | ||
| 2672 | struct intel_context *ctx, | ||
| 2673 | struct drm_i915_gem_execbuffer2 *args, | 2727 | struct drm_i915_gem_execbuffer2 *args, |
| 2674 | struct list_head *vmas, | 2728 | struct list_head *vmas); |
| 2675 | struct drm_i915_gem_object *batch_obj, | ||
| 2676 | u64 exec_start, u32 flags); | ||
| 2677 | int i915_gem_execbuffer(struct drm_device *dev, void *data, | 2729 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
| 2678 | struct drm_file *file_priv); | 2730 | struct drm_file *file_priv); |
| 2679 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, | 2731 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
| @@ -2780,9 +2832,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | |||
| 2780 | 2832 | ||
| 2781 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | 2833 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
| 2782 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, | 2834 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
| 2783 | struct intel_engine_cs *to); | 2835 | struct intel_engine_cs *to, |
| 2836 | struct drm_i915_gem_request **to_req); | ||
| 2784 | void i915_vma_move_to_active(struct i915_vma *vma, | 2837 | void i915_vma_move_to_active(struct i915_vma *vma, |
| 2785 | struct intel_engine_cs *ring); | 2838 | struct drm_i915_gem_request *req); |
| 2786 | int i915_gem_dumb_create(struct drm_file *file_priv, | 2839 | int i915_gem_dumb_create(struct drm_file *file_priv, |
| 2787 | struct drm_device *dev, | 2840 | struct drm_device *dev, |
| 2788 | struct drm_mode_create_dumb *args); | 2841 | struct drm_mode_create_dumb *args); |
| @@ -2824,7 +2877,6 @@ bool i915_gem_retire_requests(struct drm_device *dev); | |||
| 2824 | void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); | 2877 | void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); |
| 2825 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, | 2878 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
| 2826 | bool interruptible); | 2879 | bool interruptible); |
| 2827 | int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req); | ||
| 2828 | 2880 | ||
| 2829 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) | 2881 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
| 2830 | { | 2882 | { |
| @@ -2859,16 +2911,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); | |||
| 2859 | int __must_check i915_gem_init(struct drm_device *dev); | 2911 | int __must_check i915_gem_init(struct drm_device *dev); |
| 2860 | int i915_gem_init_rings(struct drm_device *dev); | 2912 | int i915_gem_init_rings(struct drm_device *dev); |
| 2861 | int __must_check i915_gem_init_hw(struct drm_device *dev); | 2913 | int __must_check i915_gem_init_hw(struct drm_device *dev); |
| 2862 | int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); | 2914 | int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); |
| 2863 | void i915_gem_init_swizzling(struct drm_device *dev); | 2915 | void i915_gem_init_swizzling(struct drm_device *dev); |
| 2864 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 2916 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
| 2865 | int __must_check i915_gpu_idle(struct drm_device *dev); | 2917 | int __must_check i915_gpu_idle(struct drm_device *dev); |
| 2866 | int __must_check i915_gem_suspend(struct drm_device *dev); | 2918 | int __must_check i915_gem_suspend(struct drm_device *dev); |
| 2867 | int __i915_add_request(struct intel_engine_cs *ring, | 2919 | void __i915_add_request(struct drm_i915_gem_request *req, |
| 2868 | struct drm_file *file, | 2920 | struct drm_i915_gem_object *batch_obj, |
| 2869 | struct drm_i915_gem_object *batch_obj); | 2921 | bool flush_caches); |
| 2870 | #define i915_add_request(ring) \ | 2922 | #define i915_add_request(req) \ |
| 2871 | __i915_add_request(ring, NULL, NULL) | 2923 | __i915_add_request(req, NULL, true) |
| 2924 | #define i915_add_request_no_flush(req) \ | ||
| 2925 | __i915_add_request(req, NULL, false) | ||
| 2872 | int __i915_wait_request(struct drm_i915_gem_request *req, | 2926 | int __i915_wait_request(struct drm_i915_gem_request *req, |
| 2873 | unsigned reset_counter, | 2927 | unsigned reset_counter, |
| 2874 | bool interruptible, | 2928 | bool interruptible, |
| @@ -2888,6 +2942,7 @@ int __must_check | |||
| 2888 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | 2942 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
| 2889 | u32 alignment, | 2943 | u32 alignment, |
| 2890 | struct intel_engine_cs *pipelined, | 2944 | struct intel_engine_cs *pipelined, |
| 2945 | struct drm_i915_gem_request **pipelined_request, | ||
| 2891 | const struct i915_ggtt_view *view); | 2946 | const struct i915_ggtt_view *view); |
| 2892 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, | 2947 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, |
| 2893 | const struct i915_ggtt_view *view); | 2948 | const struct i915_ggtt_view *view); |
| @@ -3012,10 +3067,9 @@ int __must_check i915_gem_context_init(struct drm_device *dev); | |||
| 3012 | void i915_gem_context_fini(struct drm_device *dev); | 3067 | void i915_gem_context_fini(struct drm_device *dev); |
| 3013 | void i915_gem_context_reset(struct drm_device *dev); | 3068 | void i915_gem_context_reset(struct drm_device *dev); |
| 3014 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); | 3069 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); |
| 3015 | int i915_gem_context_enable(struct drm_i915_private *dev_priv); | 3070 | int i915_gem_context_enable(struct drm_i915_gem_request *req); |
| 3016 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); | 3071 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
| 3017 | int i915_switch_context(struct intel_engine_cs *ring, | 3072 | int i915_switch_context(struct drm_i915_gem_request *req); |
| 3018 | struct intel_context *to); | ||
| 3019 | struct intel_context * | 3073 | struct intel_context * |
| 3020 | i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); | 3074 | i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); |
| 3021 | void i915_gem_context_free(struct kref *ctx_ref); | 3075 | void i915_gem_context_free(struct kref *ctx_ref); |
| @@ -3065,9 +3119,12 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) | |||
| 3065 | } | 3119 | } |
| 3066 | 3120 | ||
| 3067 | /* i915_gem_stolen.c */ | 3121 | /* i915_gem_stolen.c */ |
| 3122 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, | ||
| 3123 | struct drm_mm_node *node, u64 size, | ||
| 3124 | unsigned alignment); | ||
| 3125 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, | ||
| 3126 | struct drm_mm_node *node); | ||
| 3068 | int i915_gem_init_stolen(struct drm_device *dev); | 3127 | int i915_gem_init_stolen(struct drm_device *dev); |
| 3069 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); | ||
| 3070 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev); | ||
| 3071 | void i915_gem_cleanup_stolen(struct drm_device *dev); | 3128 | void i915_gem_cleanup_stolen(struct drm_device *dev); |
| 3072 | struct drm_i915_gem_object * | 3129 | struct drm_i915_gem_object * |
| 3073 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size); | 3130 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size); |
| @@ -3222,8 +3279,7 @@ extern void intel_modeset_gem_init(struct drm_device *dev); | |||
| 3222 | extern void intel_modeset_cleanup(struct drm_device *dev); | 3279 | extern void intel_modeset_cleanup(struct drm_device *dev); |
| 3223 | extern void intel_connector_unregister(struct intel_connector *); | 3280 | extern void intel_connector_unregister(struct intel_connector *); |
| 3224 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 3281 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
| 3225 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, | 3282 | extern void intel_display_resume(struct drm_device *dev); |
| 3226 | bool force_restore); | ||
| 3227 | extern void i915_redisable_vga(struct drm_device *dev); | 3283 | extern void i915_redisable_vga(struct drm_device *dev); |
| 3228 | extern void i915_redisable_vga_power_on(struct drm_device *dev); | 3284 | extern void i915_redisable_vga_power_on(struct drm_device *dev); |
| 3229 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 3285 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 52b446b27b4d..d9f2701b4593 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -149,14 +149,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
| 149 | { | 149 | { |
| 150 | struct drm_i915_private *dev_priv = dev->dev_private; | 150 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 151 | struct drm_i915_gem_get_aperture *args = data; | 151 | struct drm_i915_gem_get_aperture *args = data; |
| 152 | struct drm_i915_gem_object *obj; | 152 | struct i915_gtt *ggtt = &dev_priv->gtt; |
| 153 | struct i915_vma *vma; | ||
| 153 | size_t pinned; | 154 | size_t pinned; |
| 154 | 155 | ||
| 155 | pinned = 0; | 156 | pinned = 0; |
| 156 | mutex_lock(&dev->struct_mutex); | 157 | mutex_lock(&dev->struct_mutex); |
| 157 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) | 158 | list_for_each_entry(vma, &ggtt->base.active_list, mm_list) |
| 158 | if (i915_gem_obj_is_pinned(obj)) | 159 | if (vma->pin_count) |
| 159 | pinned += i915_gem_obj_ggtt_size(obj); | 160 | pinned += vma->node.size; |
| 161 | list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) | ||
| 162 | if (vma->pin_count) | ||
| 163 | pinned += vma->node.size; | ||
| 160 | mutex_unlock(&dev->struct_mutex); | 164 | mutex_unlock(&dev->struct_mutex); |
| 161 | 165 | ||
| 162 | args->aper_size = dev_priv->gtt.base.total; | 166 | args->aper_size = dev_priv->gtt.base.total; |
| @@ -347,7 +351,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
| 347 | if (ret) | 351 | if (ret) |
| 348 | return ret; | 352 | return ret; |
| 349 | 353 | ||
| 350 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); | 354 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
| 351 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 355 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
| 352 | unsigned long unwritten; | 356 | unsigned long unwritten; |
| 353 | 357 | ||
| @@ -368,7 +372,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
| 368 | i915_gem_chipset_flush(dev); | 372 | i915_gem_chipset_flush(dev); |
| 369 | 373 | ||
| 370 | out: | 374 | out: |
| 371 | intel_fb_obj_flush(obj, false); | 375 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); |
| 372 | return ret; | 376 | return ret; |
| 373 | } | 377 | } |
| 374 | 378 | ||
| @@ -801,7 +805,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
| 801 | 805 | ||
| 802 | offset = i915_gem_obj_ggtt_offset(obj) + args->offset; | 806 | offset = i915_gem_obj_ggtt_offset(obj) + args->offset; |
| 803 | 807 | ||
| 804 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); | 808 | intel_fb_obj_invalidate(obj, ORIGIN_GTT); |
| 805 | 809 | ||
| 806 | while (remain > 0) { | 810 | while (remain > 0) { |
| 807 | /* Operation in this page | 811 | /* Operation in this page |
| @@ -832,7 +836,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
| 832 | } | 836 | } |
| 833 | 837 | ||
| 834 | out_flush: | 838 | out_flush: |
| 835 | intel_fb_obj_flush(obj, false); | 839 | intel_fb_obj_flush(obj, false, ORIGIN_GTT); |
| 836 | out_unpin: | 840 | out_unpin: |
| 837 | i915_gem_object_ggtt_unpin(obj); | 841 | i915_gem_object_ggtt_unpin(obj); |
| 838 | out: | 842 | out: |
| @@ -945,7 +949,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, | |||
| 945 | if (ret) | 949 | if (ret) |
| 946 | return ret; | 950 | return ret; |
| 947 | 951 | ||
| 948 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); | 952 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
| 949 | 953 | ||
| 950 | i915_gem_object_pin_pages(obj); | 954 | i915_gem_object_pin_pages(obj); |
| 951 | 955 | ||
| @@ -1025,7 +1029,7 @@ out: | |||
| 1025 | if (needs_clflush_after) | 1029 | if (needs_clflush_after) |
| 1026 | i915_gem_chipset_flush(dev); | 1030 | i915_gem_chipset_flush(dev); |
| 1027 | 1031 | ||
| 1028 | intel_fb_obj_flush(obj, false); | 1032 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); |
| 1029 | return ret; | 1033 | return ret; |
| 1030 | } | 1034 | } |
| 1031 | 1035 | ||
| @@ -1146,23 +1150,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error, | |||
| 1146 | return 0; | 1150 | return 0; |
| 1147 | } | 1151 | } |
| 1148 | 1152 | ||
| 1149 | /* | ||
| 1150 | * Compare arbitrary request against outstanding lazy request. Emit on match. | ||
| 1151 | */ | ||
| 1152 | int | ||
| 1153 | i915_gem_check_olr(struct drm_i915_gem_request *req) | ||
| 1154 | { | ||
| 1155 | int ret; | ||
| 1156 | |||
| 1157 | WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); | ||
| 1158 | |||
| 1159 | ret = 0; | ||
| 1160 | if (req == req->ring->outstanding_lazy_request) | ||
| 1161 | ret = i915_add_request(req->ring); | ||
| 1162 | |||
| 1163 | return ret; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | static void fake_irq(unsigned long data) | 1153 | static void fake_irq(unsigned long data) |
| 1167 | { | 1154 | { |
| 1168 | wake_up_process((struct task_struct *)data); | 1155 | wake_up_process((struct task_struct *)data); |
| @@ -1334,6 +1321,33 @@ out: | |||
| 1334 | return ret; | 1321 | return ret; |
| 1335 | } | 1322 | } |
| 1336 | 1323 | ||
| 1324 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, | ||
| 1325 | struct drm_file *file) | ||
| 1326 | { | ||
| 1327 | struct drm_i915_private *dev_private; | ||
| 1328 | struct drm_i915_file_private *file_priv; | ||
| 1329 | |||
| 1330 | WARN_ON(!req || !file || req->file_priv); | ||
| 1331 | |||
| 1332 | if (!req || !file) | ||
| 1333 | return -EINVAL; | ||
| 1334 | |||
| 1335 | if (req->file_priv) | ||
| 1336 | return -EINVAL; | ||
| 1337 | |||
| 1338 | dev_private = req->ring->dev->dev_private; | ||
| 1339 | file_priv = file->driver_priv; | ||
| 1340 | |||
| 1341 | spin_lock(&file_priv->mm.lock); | ||
| 1342 | req->file_priv = file_priv; | ||
| 1343 | list_add_tail(&req->client_list, &file_priv->mm.request_list); | ||
| 1344 | spin_unlock(&file_priv->mm.lock); | ||
| 1345 | |||
| 1346 | req->pid = get_pid(task_pid(current)); | ||
| 1347 | |||
| 1348 | return 0; | ||
| 1349 | } | ||
| 1350 | |||
| 1337 | static inline void | 1351 | static inline void |
| 1338 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | 1352 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) |
| 1339 | { | 1353 | { |
| @@ -1346,6 +1360,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |||
| 1346 | list_del(&request->client_list); | 1360 | list_del(&request->client_list); |
| 1347 | request->file_priv = NULL; | 1361 | request->file_priv = NULL; |
| 1348 | spin_unlock(&file_priv->mm.lock); | 1362 | spin_unlock(&file_priv->mm.lock); |
| 1363 | |||
| 1364 | put_pid(request->pid); | ||
| 1365 | request->pid = NULL; | ||
| 1349 | } | 1366 | } |
| 1350 | 1367 | ||
| 1351 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) | 1368 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
| @@ -1365,8 +1382,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) | |||
| 1365 | list_del_init(&request->list); | 1382 | list_del_init(&request->list); |
| 1366 | i915_gem_request_remove_from_client(request); | 1383 | i915_gem_request_remove_from_client(request); |
| 1367 | 1384 | ||
| 1368 | put_pid(request->pid); | ||
| 1369 | |||
| 1370 | i915_gem_request_unreference(request); | 1385 | i915_gem_request_unreference(request); |
| 1371 | } | 1386 | } |
| 1372 | 1387 | ||
| @@ -1415,10 +1430,6 @@ i915_wait_request(struct drm_i915_gem_request *req) | |||
| 1415 | if (ret) | 1430 | if (ret) |
| 1416 | return ret; | 1431 | return ret; |
| 1417 | 1432 | ||
| 1418 | ret = i915_gem_check_olr(req); | ||
| 1419 | if (ret) | ||
| 1420 | return ret; | ||
| 1421 | |||
| 1422 | ret = __i915_wait_request(req, | 1433 | ret = __i915_wait_request(req, |
| 1423 | atomic_read(&dev_priv->gpu_error.reset_counter), | 1434 | atomic_read(&dev_priv->gpu_error.reset_counter), |
| 1424 | interruptible, NULL, NULL); | 1435 | interruptible, NULL, NULL); |
| @@ -1518,10 +1529,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
| 1518 | if (req == NULL) | 1529 | if (req == NULL) |
| 1519 | return 0; | 1530 | return 0; |
| 1520 | 1531 | ||
| 1521 | ret = i915_gem_check_olr(req); | ||
| 1522 | if (ret) | ||
| 1523 | goto err; | ||
| 1524 | |||
| 1525 | requests[n++] = i915_gem_request_reference(req); | 1532 | requests[n++] = i915_gem_request_reference(req); |
| 1526 | } else { | 1533 | } else { |
| 1527 | for (i = 0; i < I915_NUM_RINGS; i++) { | 1534 | for (i = 0; i < I915_NUM_RINGS; i++) { |
| @@ -1531,10 +1538,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
| 1531 | if (req == NULL) | 1538 | if (req == NULL) |
| 1532 | continue; | 1539 | continue; |
| 1533 | 1540 | ||
| 1534 | ret = i915_gem_check_olr(req); | ||
| 1535 | if (ret) | ||
| 1536 | goto err; | ||
| 1537 | |||
| 1538 | requests[n++] = i915_gem_request_reference(req); | 1541 | requests[n++] = i915_gem_request_reference(req); |
| 1539 | } | 1542 | } |
| 1540 | } | 1543 | } |
| @@ -1545,7 +1548,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
| 1545 | NULL, rps); | 1548 | NULL, rps); |
| 1546 | mutex_lock(&dev->struct_mutex); | 1549 | mutex_lock(&dev->struct_mutex); |
| 1547 | 1550 | ||
| 1548 | err: | ||
| 1549 | for (i = 0; i < n; i++) { | 1551 | for (i = 0; i < n; i++) { |
| 1550 | if (ret == 0) | 1552 | if (ret == 0) |
| 1551 | i915_gem_object_retire_request(obj, requests[i]); | 1553 | i915_gem_object_retire_request(obj, requests[i]); |
| @@ -1613,6 +1615,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
| 1613 | else | 1615 | else |
| 1614 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1616 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
| 1615 | 1617 | ||
| 1618 | if (write_domain != 0) | ||
| 1619 | intel_fb_obj_invalidate(obj, | ||
| 1620 | write_domain == I915_GEM_DOMAIN_GTT ? | ||
| 1621 | ORIGIN_GTT : ORIGIN_CPU); | ||
| 1622 | |||
| 1616 | unref: | 1623 | unref: |
| 1617 | drm_gem_object_unreference(&obj->base); | 1624 | drm_gem_object_unreference(&obj->base); |
| 1618 | unlock: | 1625 | unlock: |
| @@ -2349,9 +2356,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | |||
| 2349 | } | 2356 | } |
| 2350 | 2357 | ||
| 2351 | void i915_vma_move_to_active(struct i915_vma *vma, | 2358 | void i915_vma_move_to_active(struct i915_vma *vma, |
| 2352 | struct intel_engine_cs *ring) | 2359 | struct drm_i915_gem_request *req) |
| 2353 | { | 2360 | { |
| 2354 | struct drm_i915_gem_object *obj = vma->obj; | 2361 | struct drm_i915_gem_object *obj = vma->obj; |
| 2362 | struct intel_engine_cs *ring; | ||
| 2363 | |||
| 2364 | ring = i915_gem_request_get_ring(req); | ||
| 2355 | 2365 | ||
| 2356 | /* Add a reference if we're newly entering the active list. */ | 2366 | /* Add a reference if we're newly entering the active list. */ |
| 2357 | if (obj->active == 0) | 2367 | if (obj->active == 0) |
| @@ -2359,8 +2369,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
| 2359 | obj->active |= intel_ring_flag(ring); | 2369 | obj->active |= intel_ring_flag(ring); |
| 2360 | 2370 | ||
| 2361 | list_move_tail(&obj->ring_list[ring->id], &ring->active_list); | 2371 | list_move_tail(&obj->ring_list[ring->id], &ring->active_list); |
| 2362 | i915_gem_request_assign(&obj->last_read_req[ring->id], | 2372 | i915_gem_request_assign(&obj->last_read_req[ring->id], req); |
| 2363 | intel_ring_get_request(ring)); | ||
| 2364 | 2373 | ||
| 2365 | list_move_tail(&vma->mm_list, &vma->vm->active_list); | 2374 | list_move_tail(&vma->mm_list, &vma->vm->active_list); |
| 2366 | } | 2375 | } |
| @@ -2372,7 +2381,7 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj) | |||
| 2372 | RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring))); | 2381 | RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring))); |
| 2373 | 2382 | ||
| 2374 | i915_gem_request_assign(&obj->last_write_req, NULL); | 2383 | i915_gem_request_assign(&obj->last_write_req, NULL); |
| 2375 | intel_fb_obj_flush(obj, true); | 2384 | intel_fb_obj_flush(obj, true, ORIGIN_CS); |
| 2376 | } | 2385 | } |
| 2377 | 2386 | ||
| 2378 | static void | 2387 | static void |
| @@ -2472,24 +2481,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) | |||
| 2472 | return 0; | 2481 | return 0; |
| 2473 | } | 2482 | } |
| 2474 | 2483 | ||
| 2475 | int __i915_add_request(struct intel_engine_cs *ring, | 2484 | /* |
| 2476 | struct drm_file *file, | 2485 | * NB: This function is not allowed to fail. Doing so would mean the the |
| 2477 | struct drm_i915_gem_object *obj) | 2486 | * request is not being tracked for completion but the work itself is |
| 2487 | * going to happen on the hardware. This would be a Bad Thing(tm). | ||
| 2488 | */ | ||
| 2489 | void __i915_add_request(struct drm_i915_gem_request *request, | ||
| 2490 | struct drm_i915_gem_object *obj, | ||
| 2491 | bool flush_caches) | ||
| 2478 | { | 2492 | { |
| 2479 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 2493 | struct intel_engine_cs *ring; |
| 2480 | struct drm_i915_gem_request *request; | 2494 | struct drm_i915_private *dev_priv; |
| 2481 | struct intel_ringbuffer *ringbuf; | 2495 | struct intel_ringbuffer *ringbuf; |
| 2482 | u32 request_start; | 2496 | u32 request_start; |
| 2483 | int ret; | 2497 | int ret; |
| 2484 | 2498 | ||
| 2485 | request = ring->outstanding_lazy_request; | ||
| 2486 | if (WARN_ON(request == NULL)) | 2499 | if (WARN_ON(request == NULL)) |
| 2487 | return -ENOMEM; | 2500 | return; |
| 2488 | 2501 | ||
| 2489 | if (i915.enable_execlists) { | 2502 | ring = request->ring; |
| 2490 | ringbuf = request->ctx->engine[ring->id].ringbuf; | 2503 | dev_priv = ring->dev->dev_private; |
| 2491 | } else | 2504 | ringbuf = request->ringbuf; |
| 2492 | ringbuf = ring->buffer; | 2505 | |
| 2506 | /* | ||
| 2507 | * To ensure that this call will not fail, space for its emissions | ||
| 2508 | * should already have been reserved in the ring buffer. Let the ring | ||
| 2509 | * know that it is time to use that space up. | ||
| 2510 | */ | ||
| 2511 | intel_ring_reserved_space_use(ringbuf); | ||
| 2493 | 2512 | ||
| 2494 | request_start = intel_ring_get_tail(ringbuf); | 2513 | request_start = intel_ring_get_tail(ringbuf); |
| 2495 | /* | 2514 | /* |
| @@ -2499,14 +2518,13 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
| 2499 | * is that the flush _must_ happen before the next request, no matter | 2518 | * is that the flush _must_ happen before the next request, no matter |
| 2500 | * what. | 2519 | * what. |
| 2501 | */ | 2520 | */ |
| 2502 | if (i915.enable_execlists) { | 2521 | if (flush_caches) { |
| 2503 | ret = logical_ring_flush_all_caches(ringbuf, request->ctx); | 2522 | if (i915.enable_execlists) |
| 2504 | if (ret) | 2523 | ret = logical_ring_flush_all_caches(request); |
| 2505 | return ret; | 2524 | else |
| 2506 | } else { | 2525 | ret = intel_ring_flush_all_caches(request); |
| 2507 | ret = intel_ring_flush_all_caches(ring); | 2526 | /* Not allowed to fail! */ |
| 2508 | if (ret) | 2527 | WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); |
| 2509 | return ret; | ||
| 2510 | } | 2528 | } |
| 2511 | 2529 | ||
| 2512 | /* Record the position of the start of the request so that | 2530 | /* Record the position of the start of the request so that |
| @@ -2516,17 +2534,15 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
| 2516 | */ | 2534 | */ |
| 2517 | request->postfix = intel_ring_get_tail(ringbuf); | 2535 | request->postfix = intel_ring_get_tail(ringbuf); |
| 2518 | 2536 | ||
| 2519 | if (i915.enable_execlists) { | 2537 | if (i915.enable_execlists) |
| 2520 | ret = ring->emit_request(ringbuf, request); | 2538 | ret = ring->emit_request(request); |
| 2521 | if (ret) | 2539 | else { |
| 2522 | return ret; | 2540 | ret = ring->add_request(request); |
| 2523 | } else { | ||
| 2524 | ret = ring->add_request(ring); | ||
| 2525 | if (ret) | ||
| 2526 | return ret; | ||
| 2527 | 2541 | ||
| 2528 | request->tail = intel_ring_get_tail(ringbuf); | 2542 | request->tail = intel_ring_get_tail(ringbuf); |
| 2529 | } | 2543 | } |
| 2544 | /* Not allowed to fail! */ | ||
| 2545 | WARN(ret, "emit|add_request failed: %d!\n", ret); | ||
| 2530 | 2546 | ||
| 2531 | request->head = request_start; | 2547 | request->head = request_start; |
| 2532 | 2548 | ||
| @@ -2538,34 +2554,11 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
| 2538 | */ | 2554 | */ |
| 2539 | request->batch_obj = obj; | 2555 | request->batch_obj = obj; |
| 2540 | 2556 | ||
| 2541 | if (!i915.enable_execlists) { | ||
| 2542 | /* Hold a reference to the current context so that we can inspect | ||
| 2543 | * it later in case a hangcheck error event fires. | ||
| 2544 | */ | ||
| 2545 | request->ctx = ring->last_context; | ||
| 2546 | if (request->ctx) | ||
| 2547 | i915_gem_context_reference(request->ctx); | ||
| 2548 | } | ||
| 2549 | |||
| 2550 | request->emitted_jiffies = jiffies; | 2557 | request->emitted_jiffies = jiffies; |
| 2551 | ring->last_submitted_seqno = request->seqno; | 2558 | ring->last_submitted_seqno = request->seqno; |
| 2552 | list_add_tail(&request->list, &ring->request_list); | 2559 | list_add_tail(&request->list, &ring->request_list); |
| 2553 | request->file_priv = NULL; | ||
| 2554 | |||
| 2555 | if (file) { | ||
| 2556 | struct drm_i915_file_private *file_priv = file->driver_priv; | ||
| 2557 | |||
| 2558 | spin_lock(&file_priv->mm.lock); | ||
| 2559 | request->file_priv = file_priv; | ||
| 2560 | list_add_tail(&request->client_list, | ||
| 2561 | &file_priv->mm.request_list); | ||
| 2562 | spin_unlock(&file_priv->mm.lock); | ||
| 2563 | |||
| 2564 | request->pid = get_pid(task_pid(current)); | ||
| 2565 | } | ||
| 2566 | 2560 | ||
| 2567 | trace_i915_gem_request_add(request); | 2561 | trace_i915_gem_request_add(request); |
| 2568 | ring->outstanding_lazy_request = NULL; | ||
| 2569 | 2562 | ||
| 2570 | i915_queue_hangcheck(ring->dev); | 2563 | i915_queue_hangcheck(ring->dev); |
| 2571 | 2564 | ||
| @@ -2574,7 +2567,8 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
| 2574 | round_jiffies_up_relative(HZ)); | 2567 | round_jiffies_up_relative(HZ)); |
| 2575 | intel_mark_busy(dev_priv->dev); | 2568 | intel_mark_busy(dev_priv->dev); |
| 2576 | 2569 | ||
| 2577 | return 0; | 2570 | /* Sanity check that the reserved size was large enough. */ |
| 2571 | intel_ring_reserved_space_end(ringbuf); | ||
| 2578 | } | 2572 | } |
| 2579 | 2573 | ||
| 2580 | static bool i915_context_is_banned(struct drm_i915_private *dev_priv, | 2574 | static bool i915_context_is_banned(struct drm_i915_private *dev_priv, |
| @@ -2628,12 +2622,13 @@ void i915_gem_request_free(struct kref *req_ref) | |||
| 2628 | typeof(*req), ref); | 2622 | typeof(*req), ref); |
| 2629 | struct intel_context *ctx = req->ctx; | 2623 | struct intel_context *ctx = req->ctx; |
| 2630 | 2624 | ||
| 2625 | if (req->file_priv) | ||
| 2626 | i915_gem_request_remove_from_client(req); | ||
| 2627 | |||
| 2631 | if (ctx) { | 2628 | if (ctx) { |
| 2632 | if (i915.enable_execlists) { | 2629 | if (i915.enable_execlists) { |
| 2633 | struct intel_engine_cs *ring = req->ring; | 2630 | if (ctx != req->ring->default_context) |
| 2634 | 2631 | intel_lr_context_unpin(req); | |
| 2635 | if (ctx != ring->default_context) | ||
| 2636 | intel_lr_context_unpin(ring, ctx); | ||
| 2637 | } | 2632 | } |
| 2638 | 2633 | ||
| 2639 | i915_gem_context_unreference(ctx); | 2634 | i915_gem_context_unreference(ctx); |
| @@ -2643,36 +2638,63 @@ void i915_gem_request_free(struct kref *req_ref) | |||
| 2643 | } | 2638 | } |
| 2644 | 2639 | ||
| 2645 | int i915_gem_request_alloc(struct intel_engine_cs *ring, | 2640 | int i915_gem_request_alloc(struct intel_engine_cs *ring, |
| 2646 | struct intel_context *ctx) | 2641 | struct intel_context *ctx, |
| 2642 | struct drm_i915_gem_request **req_out) | ||
| 2647 | { | 2643 | { |
| 2648 | struct drm_i915_private *dev_priv = to_i915(ring->dev); | 2644 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
| 2649 | struct drm_i915_gem_request *req; | 2645 | struct drm_i915_gem_request *req; |
| 2650 | int ret; | 2646 | int ret; |
| 2651 | 2647 | ||
| 2652 | if (ring->outstanding_lazy_request) | 2648 | if (!req_out) |
| 2653 | return 0; | 2649 | return -EINVAL; |
| 2650 | |||
| 2651 | *req_out = NULL; | ||
| 2654 | 2652 | ||
| 2655 | req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); | 2653 | req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); |
| 2656 | if (req == NULL) | 2654 | if (req == NULL) |
| 2657 | return -ENOMEM; | 2655 | return -ENOMEM; |
| 2658 | 2656 | ||
| 2659 | kref_init(&req->ref); | ||
| 2660 | req->i915 = dev_priv; | ||
| 2661 | |||
| 2662 | ret = i915_gem_get_seqno(ring->dev, &req->seqno); | 2657 | ret = i915_gem_get_seqno(ring->dev, &req->seqno); |
| 2663 | if (ret) | 2658 | if (ret) |
| 2664 | goto err; | 2659 | goto err; |
| 2665 | 2660 | ||
| 2661 | kref_init(&req->ref); | ||
| 2662 | req->i915 = dev_priv; | ||
| 2666 | req->ring = ring; | 2663 | req->ring = ring; |
| 2664 | req->ctx = ctx; | ||
| 2665 | i915_gem_context_reference(req->ctx); | ||
| 2667 | 2666 | ||
| 2668 | if (i915.enable_execlists) | 2667 | if (i915.enable_execlists) |
| 2669 | ret = intel_logical_ring_alloc_request_extras(req, ctx); | 2668 | ret = intel_logical_ring_alloc_request_extras(req); |
| 2670 | else | 2669 | else |
| 2671 | ret = intel_ring_alloc_request_extras(req); | 2670 | ret = intel_ring_alloc_request_extras(req); |
| 2672 | if (ret) | 2671 | if (ret) { |
| 2672 | i915_gem_context_unreference(req->ctx); | ||
| 2673 | goto err; | 2673 | goto err; |
| 2674 | } | ||
| 2675 | |||
| 2676 | /* | ||
| 2677 | * Reserve space in the ring buffer for all the commands required to | ||
| 2678 | * eventually emit this request. This is to guarantee that the | ||
| 2679 | * i915_add_request() call can't fail. Note that the reserve may need | ||
| 2680 | * to be redone if the request is not actually submitted straight | ||
| 2681 | * away, e.g. because a GPU scheduler has deferred it. | ||
| 2682 | */ | ||
| 2683 | if (i915.enable_execlists) | ||
| 2684 | ret = intel_logical_ring_reserve_space(req); | ||
| 2685 | else | ||
| 2686 | ret = intel_ring_reserve_space(req); | ||
| 2687 | if (ret) { | ||
| 2688 | /* | ||
| 2689 | * At this point, the request is fully allocated even if not | ||
| 2690 | * fully prepared. Thus it can be cleaned up using the proper | ||
| 2691 | * free code. | ||
| 2692 | */ | ||
| 2693 | i915_gem_request_cancel(req); | ||
| 2694 | return ret; | ||
| 2695 | } | ||
| 2674 | 2696 | ||
| 2675 | ring->outstanding_lazy_request = req; | 2697 | *req_out = req; |
| 2676 | return 0; | 2698 | return 0; |
| 2677 | 2699 | ||
| 2678 | err: | 2700 | err: |
| @@ -2680,6 +2702,13 @@ err: | |||
| 2680 | return ret; | 2702 | return ret; |
| 2681 | } | 2703 | } |
| 2682 | 2704 | ||
| 2705 | void i915_gem_request_cancel(struct drm_i915_gem_request *req) | ||
| 2706 | { | ||
| 2707 | intel_ring_reserved_space_cancel(req->ringbuf); | ||
| 2708 | |||
| 2709 | i915_gem_request_unreference(req); | ||
| 2710 | } | ||
| 2711 | |||
| 2683 | struct drm_i915_gem_request * | 2712 | struct drm_i915_gem_request * |
| 2684 | i915_gem_find_active_request(struct intel_engine_cs *ring) | 2713 | i915_gem_find_active_request(struct intel_engine_cs *ring) |
| 2685 | { | 2714 | { |
| @@ -2741,7 +2770,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
| 2741 | list_del(&submit_req->execlist_link); | 2770 | list_del(&submit_req->execlist_link); |
| 2742 | 2771 | ||
| 2743 | if (submit_req->ctx != ring->default_context) | 2772 | if (submit_req->ctx != ring->default_context) |
| 2744 | intel_lr_context_unpin(ring, submit_req->ctx); | 2773 | intel_lr_context_unpin(submit_req); |
| 2745 | 2774 | ||
| 2746 | i915_gem_request_unreference(submit_req); | 2775 | i915_gem_request_unreference(submit_req); |
| 2747 | } | 2776 | } |
| @@ -2762,9 +2791,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
| 2762 | 2791 | ||
| 2763 | i915_gem_request_retire(request); | 2792 | i915_gem_request_retire(request); |
| 2764 | } | 2793 | } |
| 2765 | |||
| 2766 | /* This may not have been flushed before the reset, so clean it now */ | ||
| 2767 | i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); | ||
| 2768 | } | 2794 | } |
| 2769 | 2795 | ||
| 2770 | void i915_gem_restore_fences(struct drm_device *dev) | 2796 | void i915_gem_restore_fences(struct drm_device *dev) |
| @@ -2947,7 +2973,7 @@ i915_gem_idle_work_handler(struct work_struct *work) | |||
| 2947 | static int | 2973 | static int |
| 2948 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | 2974 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) |
| 2949 | { | 2975 | { |
| 2950 | int ret, i; | 2976 | int i; |
| 2951 | 2977 | ||
| 2952 | if (!obj->active) | 2978 | if (!obj->active) |
| 2953 | return 0; | 2979 | return 0; |
| @@ -2962,10 +2988,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | |||
| 2962 | if (list_empty(&req->list)) | 2988 | if (list_empty(&req->list)) |
| 2963 | goto retire; | 2989 | goto retire; |
| 2964 | 2990 | ||
| 2965 | ret = i915_gem_check_olr(req); | ||
| 2966 | if (ret) | ||
| 2967 | return ret; | ||
| 2968 | |||
| 2969 | if (i915_gem_request_completed(req, true)) { | 2991 | if (i915_gem_request_completed(req, true)) { |
| 2970 | __i915_gem_request_retire__upto(req); | 2992 | __i915_gem_request_retire__upto(req); |
| 2971 | retire: | 2993 | retire: |
| @@ -3068,25 +3090,22 @@ out: | |||
| 3068 | static int | 3090 | static int |
| 3069 | __i915_gem_object_sync(struct drm_i915_gem_object *obj, | 3091 | __i915_gem_object_sync(struct drm_i915_gem_object *obj, |
| 3070 | struct intel_engine_cs *to, | 3092 | struct intel_engine_cs *to, |
| 3071 | struct drm_i915_gem_request *req) | 3093 | struct drm_i915_gem_request *from_req, |
| 3094 | struct drm_i915_gem_request **to_req) | ||
| 3072 | { | 3095 | { |
| 3073 | struct intel_engine_cs *from; | 3096 | struct intel_engine_cs *from; |
| 3074 | int ret; | 3097 | int ret; |
| 3075 | 3098 | ||
| 3076 | from = i915_gem_request_get_ring(req); | 3099 | from = i915_gem_request_get_ring(from_req); |
| 3077 | if (to == from) | 3100 | if (to == from) |
| 3078 | return 0; | 3101 | return 0; |
| 3079 | 3102 | ||
| 3080 | if (i915_gem_request_completed(req, true)) | 3103 | if (i915_gem_request_completed(from_req, true)) |
| 3081 | return 0; | 3104 | return 0; |
| 3082 | 3105 | ||
| 3083 | ret = i915_gem_check_olr(req); | ||
| 3084 | if (ret) | ||
| 3085 | return ret; | ||
| 3086 | |||
| 3087 | if (!i915_semaphore_is_enabled(obj->base.dev)) { | 3106 | if (!i915_semaphore_is_enabled(obj->base.dev)) { |
| 3088 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 3107 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 3089 | ret = __i915_wait_request(req, | 3108 | ret = __i915_wait_request(from_req, |
| 3090 | atomic_read(&i915->gpu_error.reset_counter), | 3109 | atomic_read(&i915->gpu_error.reset_counter), |
| 3091 | i915->mm.interruptible, | 3110 | i915->mm.interruptible, |
| 3092 | NULL, | 3111 | NULL, |
| @@ -3094,16 +3113,24 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
| 3094 | if (ret) | 3113 | if (ret) |
| 3095 | return ret; | 3114 | return ret; |
| 3096 | 3115 | ||
| 3097 | i915_gem_object_retire_request(obj, req); | 3116 | i915_gem_object_retire_request(obj, from_req); |
| 3098 | } else { | 3117 | } else { |
| 3099 | int idx = intel_ring_sync_index(from, to); | 3118 | int idx = intel_ring_sync_index(from, to); |
| 3100 | u32 seqno = i915_gem_request_get_seqno(req); | 3119 | u32 seqno = i915_gem_request_get_seqno(from_req); |
| 3120 | |||
| 3121 | WARN_ON(!to_req); | ||
| 3101 | 3122 | ||
| 3102 | if (seqno <= from->semaphore.sync_seqno[idx]) | 3123 | if (seqno <= from->semaphore.sync_seqno[idx]) |
| 3103 | return 0; | 3124 | return 0; |
| 3104 | 3125 | ||
| 3105 | trace_i915_gem_ring_sync_to(from, to, req); | 3126 | if (*to_req == NULL) { |
| 3106 | ret = to->semaphore.sync_to(to, from, seqno); | 3127 | ret = i915_gem_request_alloc(to, to->default_context, to_req); |
| 3128 | if (ret) | ||
| 3129 | return ret; | ||
| 3130 | } | ||
| 3131 | |||
| 3132 | trace_i915_gem_ring_sync_to(*to_req, from, from_req); | ||
| 3133 | ret = to->semaphore.sync_to(*to_req, from, seqno); | ||
| 3107 | if (ret) | 3134 | if (ret) |
| 3108 | return ret; | 3135 | return ret; |
| 3109 | 3136 | ||
| @@ -3123,11 +3150,14 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
| 3123 | * | 3150 | * |
| 3124 | * @obj: object which may be in use on another ring. | 3151 | * @obj: object which may be in use on another ring. |
| 3125 | * @to: ring we wish to use the object on. May be NULL. | 3152 | * @to: ring we wish to use the object on. May be NULL. |
| 3153 | * @to_req: request we wish to use the object for. See below. | ||
| 3154 | * This will be allocated and returned if a request is | ||
| 3155 | * required but not passed in. | ||
| 3126 | * | 3156 | * |
| 3127 | * This code is meant to abstract object synchronization with the GPU. | 3157 | * This code is meant to abstract object synchronization with the GPU. |
| 3128 | * Calling with NULL implies synchronizing the object with the CPU | 3158 | * Calling with NULL implies synchronizing the object with the CPU |
| 3129 | * rather than a particular GPU ring. Conceptually we serialise writes | 3159 | * rather than a particular GPU ring. Conceptually we serialise writes |
| 3130 | * between engines inside the GPU. We only allow on engine to write | 3160 | * between engines inside the GPU. We only allow one engine to write |
| 3131 | * into a buffer at any time, but multiple readers. To ensure each has | 3161 | * into a buffer at any time, but multiple readers. To ensure each has |
| 3132 | * a coherent view of memory, we must: | 3162 | * a coherent view of memory, we must: |
| 3133 | * | 3163 | * |
| @@ -3138,11 +3168,22 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
| 3138 | * - If we are a write request (pending_write_domain is set), the new | 3168 | * - If we are a write request (pending_write_domain is set), the new |
| 3139 | * request must wait for outstanding read requests to complete. | 3169 | * request must wait for outstanding read requests to complete. |
| 3140 | * | 3170 | * |
| 3171 | * For CPU synchronisation (NULL to) no request is required. For syncing with | ||
| 3172 | * rings to_req must be non-NULL. However, a request does not have to be | ||
| 3173 | * pre-allocated. If *to_req is NULL and sync commands will be emitted then a | ||
| 3174 | * request will be allocated automatically and returned through *to_req. Note | ||
| 3175 | * that it is not guaranteed that commands will be emitted (because the system | ||
| 3176 | * might already be idle). Hence there is no need to create a request that | ||
| 3177 | * might never have any work submitted. Note further that if a request is | ||
| 3178 | * returned in *to_req, it is the responsibility of the caller to submit | ||
| 3179 | * that request (after potentially adding more work to it). | ||
| 3180 | * | ||
| 3141 | * Returns 0 if successful, else propagates up the lower layer error. | 3181 | * Returns 0 if successful, else propagates up the lower layer error. |
| 3142 | */ | 3182 | */ |
| 3143 | int | 3183 | int |
| 3144 | i915_gem_object_sync(struct drm_i915_gem_object *obj, | 3184 | i915_gem_object_sync(struct drm_i915_gem_object *obj, |
| 3145 | struct intel_engine_cs *to) | 3185 | struct intel_engine_cs *to, |
| 3186 | struct drm_i915_gem_request **to_req) | ||
| 3146 | { | 3187 | { |
| 3147 | const bool readonly = obj->base.pending_write_domain == 0; | 3188 | const bool readonly = obj->base.pending_write_domain == 0; |
| 3148 | struct drm_i915_gem_request *req[I915_NUM_RINGS]; | 3189 | struct drm_i915_gem_request *req[I915_NUM_RINGS]; |
| @@ -3164,7 +3205,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
| 3164 | req[n++] = obj->last_read_req[i]; | 3205 | req[n++] = obj->last_read_req[i]; |
| 3165 | } | 3206 | } |
| 3166 | for (i = 0; i < n; i++) { | 3207 | for (i = 0; i < n; i++) { |
| 3167 | ret = __i915_gem_object_sync(obj, to, req[i]); | 3208 | ret = __i915_gem_object_sync(obj, to, req[i], to_req); |
| 3168 | if (ret) | 3209 | if (ret) |
| 3169 | return ret; | 3210 | return ret; |
| 3170 | } | 3211 | } |
| @@ -3275,9 +3316,19 @@ int i915_gpu_idle(struct drm_device *dev) | |||
| 3275 | /* Flush everything onto the inactive list. */ | 3316 | /* Flush everything onto the inactive list. */ |
| 3276 | for_each_ring(ring, dev_priv, i) { | 3317 | for_each_ring(ring, dev_priv, i) { |
| 3277 | if (!i915.enable_execlists) { | 3318 | if (!i915.enable_execlists) { |
| 3278 | ret = i915_switch_context(ring, ring->default_context); | 3319 | struct drm_i915_gem_request *req; |
| 3320 | |||
| 3321 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); | ||
| 3279 | if (ret) | 3322 | if (ret) |
| 3280 | return ret; | 3323 | return ret; |
| 3324 | |||
| 3325 | ret = i915_switch_context(req); | ||
| 3326 | if (ret) { | ||
| 3327 | i915_gem_request_cancel(req); | ||
| 3328 | return ret; | ||
| 3329 | } | ||
| 3330 | |||
| 3331 | i915_add_request_no_flush(req); | ||
| 3281 | } | 3332 | } |
| 3282 | 3333 | ||
| 3283 | ret = intel_ring_idle(ring); | 3334 | ret = intel_ring_idle(ring); |
| @@ -3673,9 +3724,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
| 3673 | struct drm_device *dev = obj->base.dev; | 3724 | struct drm_device *dev = obj->base.dev; |
| 3674 | struct drm_i915_private *dev_priv = dev->dev_private; | 3725 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3675 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 3726 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
| 3676 | unsigned long start = | 3727 | u64 start = |
| 3677 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; | 3728 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; |
| 3678 | unsigned long end = | 3729 | u64 end = |
| 3679 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; | 3730 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; |
| 3680 | struct i915_vma *vma; | 3731 | struct i915_vma *vma; |
| 3681 | int ret; | 3732 | int ret; |
| @@ -3731,7 +3782,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
| 3731 | * attempt to find space. | 3782 | * attempt to find space. |
| 3732 | */ | 3783 | */ |
| 3733 | if (size > end) { | 3784 | if (size > end) { |
| 3734 | DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n", | 3785 | DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n", |
| 3735 | ggtt_view ? ggtt_view->type : 0, | 3786 | ggtt_view ? ggtt_view->type : 0, |
| 3736 | size, | 3787 | size, |
| 3737 | flags & PIN_MAPPABLE ? "mappable" : "total", | 3788 | flags & PIN_MAPPABLE ? "mappable" : "total", |
| @@ -3853,7 +3904,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) | |||
| 3853 | old_write_domain = obj->base.write_domain; | 3904 | old_write_domain = obj->base.write_domain; |
| 3854 | obj->base.write_domain = 0; | 3905 | obj->base.write_domain = 0; |
| 3855 | 3906 | ||
| 3856 | intel_fb_obj_flush(obj, false); | 3907 | intel_fb_obj_flush(obj, false, ORIGIN_GTT); |
| 3857 | 3908 | ||
| 3858 | trace_i915_gem_object_change_domain(obj, | 3909 | trace_i915_gem_object_change_domain(obj, |
| 3859 | obj->base.read_domains, | 3910 | obj->base.read_domains, |
| @@ -3875,7 +3926,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) | |||
| 3875 | old_write_domain = obj->base.write_domain; | 3926 | old_write_domain = obj->base.write_domain; |
| 3876 | obj->base.write_domain = 0; | 3927 | obj->base.write_domain = 0; |
| 3877 | 3928 | ||
| 3878 | intel_fb_obj_flush(obj, false); | 3929 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); |
| 3879 | 3930 | ||
| 3880 | trace_i915_gem_object_change_domain(obj, | 3931 | trace_i915_gem_object_change_domain(obj, |
| 3881 | obj->base.read_domains, | 3932 | obj->base.read_domains, |
| @@ -3937,9 +3988,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
| 3937 | obj->dirty = 1; | 3988 | obj->dirty = 1; |
| 3938 | } | 3989 | } |
| 3939 | 3990 | ||
| 3940 | if (write) | ||
| 3941 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); | ||
| 3942 | |||
| 3943 | trace_i915_gem_object_change_domain(obj, | 3991 | trace_i915_gem_object_change_domain(obj, |
| 3944 | old_read_domains, | 3992 | old_read_domains, |
| 3945 | old_write_domain); | 3993 | old_write_domain); |
| @@ -4094,12 +4142,13 @@ int | |||
| 4094 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | 4142 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
| 4095 | u32 alignment, | 4143 | u32 alignment, |
| 4096 | struct intel_engine_cs *pipelined, | 4144 | struct intel_engine_cs *pipelined, |
| 4145 | struct drm_i915_gem_request **pipelined_request, | ||
| 4097 | const struct i915_ggtt_view *view) | 4146 | const struct i915_ggtt_view *view) |
| 4098 | { | 4147 | { |
| 4099 | u32 old_read_domains, old_write_domain; | 4148 | u32 old_read_domains, old_write_domain; |
| 4100 | int ret; | 4149 | int ret; |
| 4101 | 4150 | ||
| 4102 | ret = i915_gem_object_sync(obj, pipelined); | 4151 | ret = i915_gem_object_sync(obj, pipelined, pipelined_request); |
| 4103 | if (ret) | 4152 | if (ret) |
| 4104 | return ret; | 4153 | return ret; |
| 4105 | 4154 | ||
| @@ -4210,9 +4259,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
| 4210 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 4259 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
| 4211 | } | 4260 | } |
| 4212 | 4261 | ||
| 4213 | if (write) | ||
| 4214 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); | ||
| 4215 | |||
| 4216 | trace_i915_gem_object_change_domain(obj, | 4262 | trace_i915_gem_object_change_domain(obj, |
| 4217 | old_read_domains, | 4263 | old_read_domains, |
| 4218 | old_write_domain); | 4264 | old_write_domain); |
| @@ -4253,6 +4299,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
| 4253 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 4299 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
| 4254 | break; | 4300 | break; |
| 4255 | 4301 | ||
| 4302 | /* | ||
| 4303 | * Note that the request might not have been submitted yet. | ||
| 4304 | * In which case emitted_jiffies will be zero. | ||
| 4305 | */ | ||
| 4306 | if (!request->emitted_jiffies) | ||
| 4307 | continue; | ||
| 4308 | |||
| 4256 | target = request; | 4309 | target = request; |
| 4257 | } | 4310 | } |
| 4258 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 4311 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
| @@ -4810,8 +4863,9 @@ err: | |||
| 4810 | return ret; | 4863 | return ret; |
| 4811 | } | 4864 | } |
| 4812 | 4865 | ||
| 4813 | int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) | 4866 | int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) |
| 4814 | { | 4867 | { |
| 4868 | struct intel_engine_cs *ring = req->ring; | ||
| 4815 | struct drm_device *dev = ring->dev; | 4869 | struct drm_device *dev = ring->dev; |
| 4816 | struct drm_i915_private *dev_priv = dev->dev_private; | 4870 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4817 | u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); | 4871 | u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); |
| @@ -4821,7 +4875,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) | |||
| 4821 | if (!HAS_L3_DPF(dev) || !remap_info) | 4875 | if (!HAS_L3_DPF(dev) || !remap_info) |
| 4822 | return 0; | 4876 | return 0; |
| 4823 | 4877 | ||
| 4824 | ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3); | 4878 | ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3); |
| 4825 | if (ret) | 4879 | if (ret) |
| 4826 | return ret; | 4880 | return ret; |
| 4827 | 4881 | ||
| @@ -4967,7 +5021,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4967 | { | 5021 | { |
| 4968 | struct drm_i915_private *dev_priv = dev->dev_private; | 5022 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4969 | struct intel_engine_cs *ring; | 5023 | struct intel_engine_cs *ring; |
| 4970 | int ret, i; | 5024 | int ret, i, j; |
| 4971 | 5025 | ||
| 4972 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 5026 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
| 4973 | return -EIO; | 5027 | return -EIO; |
| @@ -5004,27 +5058,55 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 5004 | */ | 5058 | */ |
| 5005 | init_unused_rings(dev); | 5059 | init_unused_rings(dev); |
| 5006 | 5060 | ||
| 5061 | BUG_ON(!dev_priv->ring[RCS].default_context); | ||
| 5062 | |||
| 5063 | ret = i915_ppgtt_init_hw(dev); | ||
| 5064 | if (ret) { | ||
| 5065 | DRM_ERROR("PPGTT enable HW failed %d\n", ret); | ||
| 5066 | goto out; | ||
| 5067 | } | ||
| 5068 | |||
| 5069 | /* Need to do basic initialisation of all rings first: */ | ||
| 5007 | for_each_ring(ring, dev_priv, i) { | 5070 | for_each_ring(ring, dev_priv, i) { |
| 5008 | ret = ring->init_hw(ring); | 5071 | ret = ring->init_hw(ring); |
| 5009 | if (ret) | 5072 | if (ret) |
| 5010 | goto out; | 5073 | goto out; |
| 5011 | } | 5074 | } |
| 5012 | 5075 | ||
| 5013 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 5076 | /* Now it is safe to go back round and do everything else: */ |
| 5014 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); | 5077 | for_each_ring(ring, dev_priv, i) { |
| 5078 | struct drm_i915_gem_request *req; | ||
| 5015 | 5079 | ||
| 5016 | ret = i915_ppgtt_init_hw(dev); | 5080 | WARN_ON(!ring->default_context); |
| 5017 | if (ret && ret != -EIO) { | ||
| 5018 | DRM_ERROR("PPGTT enable failed %d\n", ret); | ||
| 5019 | i915_gem_cleanup_ringbuffer(dev); | ||
| 5020 | } | ||
| 5021 | 5081 | ||
| 5022 | ret = i915_gem_context_enable(dev_priv); | 5082 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
| 5023 | if (ret && ret != -EIO) { | 5083 | if (ret) { |
| 5024 | DRM_ERROR("Context enable failed %d\n", ret); | 5084 | i915_gem_cleanup_ringbuffer(dev); |
| 5025 | i915_gem_cleanup_ringbuffer(dev); | 5085 | goto out; |
| 5086 | } | ||
| 5026 | 5087 | ||
| 5027 | goto out; | 5088 | if (ring->id == RCS) { |
| 5089 | for (j = 0; j < NUM_L3_SLICES(dev); j++) | ||
| 5090 | i915_gem_l3_remap(req, j); | ||
| 5091 | } | ||
| 5092 | |||
| 5093 | ret = i915_ppgtt_init_ring(req); | ||
| 5094 | if (ret && ret != -EIO) { | ||
| 5095 | DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); | ||
| 5096 | i915_gem_request_cancel(req); | ||
| 5097 | i915_gem_cleanup_ringbuffer(dev); | ||
| 5098 | goto out; | ||
| 5099 | } | ||
| 5100 | |||
| 5101 | ret = i915_gem_context_enable(req); | ||
| 5102 | if (ret && ret != -EIO) { | ||
| 5103 | DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); | ||
| 5104 | i915_gem_request_cancel(req); | ||
| 5105 | i915_gem_cleanup_ringbuffer(dev); | ||
| 5106 | goto out; | ||
| 5107 | } | ||
| 5108 | |||
| 5109 | i915_add_request_no_flush(req); | ||
| 5028 | } | 5110 | } |
| 5029 | 5111 | ||
| 5030 | out: | 5112 | out: |
| @@ -5111,6 +5193,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
| 5111 | 5193 | ||
| 5112 | for_each_ring(ring, dev_priv, i) | 5194 | for_each_ring(ring, dev_priv, i) |
| 5113 | dev_priv->gt.cleanup_ring(ring); | 5195 | dev_priv->gt.cleanup_ring(ring); |
| 5196 | |||
| 5197 | if (i915.enable_execlists) | ||
| 5198 | /* | ||
| 5199 | * Neither the BIOS, ourselves or any other kernel | ||
| 5200 | * expects the system to be in execlists mode on startup, | ||
| 5201 | * so we need to reset the GPU back to legacy mode. | ||
| 5202 | */ | ||
| 5203 | intel_gpu_reset(dev); | ||
| 5114 | } | 5204 | } |
| 5115 | 5205 | ||
| 5116 | static void | 5206 | static void |
| @@ -5387,4 +5477,3 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) | |||
| 5387 | 5477 | ||
| 5388 | return false; | 5478 | return false; |
| 5389 | } | 5479 | } |
| 5390 | |||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 48afa777e94a..b77a8f78c35a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -407,32 +407,23 @@ void i915_gem_context_fini(struct drm_device *dev) | |||
| 407 | i915_gem_context_unreference(dctx); | 407 | i915_gem_context_unreference(dctx); |
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | int i915_gem_context_enable(struct drm_i915_private *dev_priv) | 410 | int i915_gem_context_enable(struct drm_i915_gem_request *req) |
| 411 | { | 411 | { |
| 412 | struct intel_engine_cs *ring; | 412 | struct intel_engine_cs *ring = req->ring; |
| 413 | int ret, i; | 413 | int ret; |
| 414 | |||
| 415 | BUG_ON(!dev_priv->ring[RCS].default_context); | ||
| 416 | 414 | ||
| 417 | if (i915.enable_execlists) { | 415 | if (i915.enable_execlists) { |
| 418 | for_each_ring(ring, dev_priv, i) { | 416 | if (ring->init_context == NULL) |
| 419 | if (ring->init_context) { | 417 | return 0; |
| 420 | ret = ring->init_context(ring, | ||
| 421 | ring->default_context); | ||
| 422 | if (ret) { | ||
| 423 | DRM_ERROR("ring init context: %d\n", | ||
| 424 | ret); | ||
| 425 | return ret; | ||
| 426 | } | ||
| 427 | } | ||
| 428 | } | ||
| 429 | 418 | ||
| 419 | ret = ring->init_context(req); | ||
| 430 | } else | 420 | } else |
| 431 | for_each_ring(ring, dev_priv, i) { | 421 | ret = i915_switch_context(req); |
| 432 | ret = i915_switch_context(ring, ring->default_context); | 422 | |
| 433 | if (ret) | 423 | if (ret) { |
| 434 | return ret; | 424 | DRM_ERROR("ring init context: %d\n", ret); |
| 435 | } | 425 | return ret; |
| 426 | } | ||
| 436 | 427 | ||
| 437 | return 0; | 428 | return 0; |
| 438 | } | 429 | } |
| @@ -485,10 +476,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) | |||
| 485 | } | 476 | } |
| 486 | 477 | ||
| 487 | static inline int | 478 | static inline int |
| 488 | mi_set_context(struct intel_engine_cs *ring, | 479 | mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) |
| 489 | struct intel_context *new_context, | ||
| 490 | u32 hw_flags) | ||
| 491 | { | 480 | { |
| 481 | struct intel_engine_cs *ring = req->ring; | ||
| 492 | u32 flags = hw_flags | MI_MM_SPACE_GTT; | 482 | u32 flags = hw_flags | MI_MM_SPACE_GTT; |
| 493 | const int num_rings = | 483 | const int num_rings = |
| 494 | /* Use an extended w/a on ivb+ if signalling from other rings */ | 484 | /* Use an extended w/a on ivb+ if signalling from other rings */ |
| @@ -503,13 +493,15 @@ mi_set_context(struct intel_engine_cs *ring, | |||
| 503 | * itlb_before_ctx_switch. | 493 | * itlb_before_ctx_switch. |
| 504 | */ | 494 | */ |
| 505 | if (IS_GEN6(ring->dev)) { | 495 | if (IS_GEN6(ring->dev)) { |
| 506 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); | 496 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0); |
| 507 | if (ret) | 497 | if (ret) |
| 508 | return ret; | 498 | return ret; |
| 509 | } | 499 | } |
| 510 | 500 | ||
| 511 | /* These flags are for resource streamer on HSW+ */ | 501 | /* These flags are for resource streamer on HSW+ */ |
| 512 | if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) | 502 | if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8) |
| 503 | flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); | ||
| 504 | else if (INTEL_INFO(ring->dev)->gen < 8) | ||
| 513 | flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); | 505 | flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); |
| 514 | 506 | ||
| 515 | 507 | ||
| @@ -517,7 +509,7 @@ mi_set_context(struct intel_engine_cs *ring, | |||
| 517 | if (INTEL_INFO(ring->dev)->gen >= 7) | 509 | if (INTEL_INFO(ring->dev)->gen >= 7) |
| 518 | len += 2 + (num_rings ? 4*num_rings + 2 : 0); | 510 | len += 2 + (num_rings ? 4*num_rings + 2 : 0); |
| 519 | 511 | ||
| 520 | ret = intel_ring_begin(ring, len); | 512 | ret = intel_ring_begin(req, len); |
| 521 | if (ret) | 513 | if (ret) |
| 522 | return ret; | 514 | return ret; |
| 523 | 515 | ||
| @@ -540,7 +532,7 @@ mi_set_context(struct intel_engine_cs *ring, | |||
| 540 | 532 | ||
| 541 | intel_ring_emit(ring, MI_NOOP); | 533 | intel_ring_emit(ring, MI_NOOP); |
| 542 | intel_ring_emit(ring, MI_SET_CONTEXT); | 534 | intel_ring_emit(ring, MI_SET_CONTEXT); |
| 543 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | | 535 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | |
| 544 | flags); | 536 | flags); |
| 545 | /* | 537 | /* |
| 546 | * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP | 538 | * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP |
| @@ -621,9 +613,10 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, | |||
| 621 | return false; | 613 | return false; |
| 622 | } | 614 | } |
| 623 | 615 | ||
| 624 | static int do_switch(struct intel_engine_cs *ring, | 616 | static int do_switch(struct drm_i915_gem_request *req) |
| 625 | struct intel_context *to) | ||
| 626 | { | 617 | { |
| 618 | struct intel_context *to = req->ctx; | ||
| 619 | struct intel_engine_cs *ring = req->ring; | ||
| 627 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 620 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 628 | struct intel_context *from = ring->last_context; | 621 | struct intel_context *from = ring->last_context; |
| 629 | u32 hw_flags = 0; | 622 | u32 hw_flags = 0; |
| @@ -659,7 +652,7 @@ static int do_switch(struct intel_engine_cs *ring, | |||
| 659 | * Register Immediate commands in Ring Buffer before submitting | 652 | * Register Immediate commands in Ring Buffer before submitting |
| 660 | * a context."*/ | 653 | * a context."*/ |
| 661 | trace_switch_mm(ring, to); | 654 | trace_switch_mm(ring, to); |
| 662 | ret = to->ppgtt->switch_mm(to->ppgtt, ring); | 655 | ret = to->ppgtt->switch_mm(to->ppgtt, req); |
| 663 | if (ret) | 656 | if (ret) |
| 664 | goto unpin_out; | 657 | goto unpin_out; |
| 665 | 658 | ||
| @@ -701,7 +694,7 @@ static int do_switch(struct intel_engine_cs *ring, | |||
| 701 | WARN_ON(needs_pd_load_pre(ring, to) && | 694 | WARN_ON(needs_pd_load_pre(ring, to) && |
| 702 | needs_pd_load_post(ring, to, hw_flags)); | 695 | needs_pd_load_post(ring, to, hw_flags)); |
| 703 | 696 | ||
| 704 | ret = mi_set_context(ring, to, hw_flags); | 697 | ret = mi_set_context(req, hw_flags); |
| 705 | if (ret) | 698 | if (ret) |
| 706 | goto unpin_out; | 699 | goto unpin_out; |
| 707 | 700 | ||
| @@ -710,7 +703,7 @@ static int do_switch(struct intel_engine_cs *ring, | |||
| 710 | */ | 703 | */ |
| 711 | if (needs_pd_load_post(ring, to, hw_flags)) { | 704 | if (needs_pd_load_post(ring, to, hw_flags)) { |
| 712 | trace_switch_mm(ring, to); | 705 | trace_switch_mm(ring, to); |
| 713 | ret = to->ppgtt->switch_mm(to->ppgtt, ring); | 706 | ret = to->ppgtt->switch_mm(to->ppgtt, req); |
| 714 | /* The hardware context switch is emitted, but we haven't | 707 | /* The hardware context switch is emitted, but we haven't |
| 715 | * actually changed the state - so it's probably safe to bail | 708 | * actually changed the state - so it's probably safe to bail |
| 716 | * here. Still, let the user know something dangerous has | 709 | * here. Still, let the user know something dangerous has |
| @@ -726,7 +719,7 @@ static int do_switch(struct intel_engine_cs *ring, | |||
| 726 | if (!(to->remap_slice & (1<<i))) | 719 | if (!(to->remap_slice & (1<<i))) |
| 727 | continue; | 720 | continue; |
| 728 | 721 | ||
| 729 | ret = i915_gem_l3_remap(ring, i); | 722 | ret = i915_gem_l3_remap(req, i); |
| 730 | /* If it failed, try again next round */ | 723 | /* If it failed, try again next round */ |
| 731 | if (ret) | 724 | if (ret) |
| 732 | DRM_DEBUG_DRIVER("L3 remapping failed\n"); | 725 | DRM_DEBUG_DRIVER("L3 remapping failed\n"); |
| @@ -742,7 +735,7 @@ static int do_switch(struct intel_engine_cs *ring, | |||
| 742 | */ | 735 | */ |
| 743 | if (from != NULL) { | 736 | if (from != NULL) { |
| 744 | from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; | 737 | from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; |
| 745 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring); | 738 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); |
| 746 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the | 739 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the |
| 747 | * whole damn pipeline, we don't need to explicitly mark the | 740 | * whole damn pipeline, we don't need to explicitly mark the |
| 748 | * object dirty. The only exception is that the context must be | 741 | * object dirty. The only exception is that the context must be |
| @@ -766,7 +759,7 @@ done: | |||
| 766 | 759 | ||
| 767 | if (uninitialized) { | 760 | if (uninitialized) { |
| 768 | if (ring->init_context) { | 761 | if (ring->init_context) { |
| 769 | ret = ring->init_context(ring, to); | 762 | ret = ring->init_context(req); |
| 770 | if (ret) | 763 | if (ret) |
| 771 | DRM_ERROR("ring init context: %d\n", ret); | 764 | DRM_ERROR("ring init context: %d\n", ret); |
| 772 | } | 765 | } |
| @@ -782,8 +775,7 @@ unpin_out: | |||
| 782 | 775 | ||
| 783 | /** | 776 | /** |
| 784 | * i915_switch_context() - perform a GPU context switch. | 777 | * i915_switch_context() - perform a GPU context switch. |
| 785 | * @ring: ring for which we'll execute the context switch | 778 | * @req: request for which we'll execute the context switch |
| 786 | * @to: the context to switch to | ||
| 787 | * | 779 | * |
| 788 | * The context life cycle is simple. The context refcount is incremented and | 780 | * The context life cycle is simple. The context refcount is incremented and |
| 789 | * decremented by 1 and create and destroy. If the context is in use by the GPU, | 781 | * decremented by 1 and create and destroy. If the context is in use by the GPU, |
| @@ -794,25 +786,25 @@ unpin_out: | |||
| 794 | * switched by writing to the ELSP and requests keep a reference to their | 786 | * switched by writing to the ELSP and requests keep a reference to their |
| 795 | * context. | 787 | * context. |
| 796 | */ | 788 | */ |
| 797 | int i915_switch_context(struct intel_engine_cs *ring, | 789 | int i915_switch_context(struct drm_i915_gem_request *req) |
| 798 | struct intel_context *to) | ||
| 799 | { | 790 | { |
| 791 | struct intel_engine_cs *ring = req->ring; | ||
| 800 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 792 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 801 | 793 | ||
| 802 | WARN_ON(i915.enable_execlists); | 794 | WARN_ON(i915.enable_execlists); |
| 803 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | 795 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
| 804 | 796 | ||
| 805 | if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ | 797 | if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ |
| 806 | if (to != ring->last_context) { | 798 | if (req->ctx != ring->last_context) { |
| 807 | i915_gem_context_reference(to); | 799 | i915_gem_context_reference(req->ctx); |
| 808 | if (ring->last_context) | 800 | if (ring->last_context) |
| 809 | i915_gem_context_unreference(ring->last_context); | 801 | i915_gem_context_unreference(ring->last_context); |
| 810 | ring->last_context = to; | 802 | ring->last_context = req->ctx; |
| 811 | } | 803 | } |
| 812 | return 0; | 804 | return 0; |
| 813 | } | 805 | } |
| 814 | 806 | ||
| 815 | return do_switch(ring, to); | 807 | return do_switch(req); |
| 816 | } | 808 | } |
| 817 | 809 | ||
| 818 | static bool contexts_enabled(struct drm_device *dev) | 810 | static bool contexts_enabled(struct drm_device *dev) |
| @@ -898,6 +890,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 898 | case I915_CONTEXT_PARAM_BAN_PERIOD: | 890 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
| 899 | args->value = ctx->hang_stats.ban_period_seconds; | 891 | args->value = ctx->hang_stats.ban_period_seconds; |
| 900 | break; | 892 | break; |
| 893 | case I915_CONTEXT_PARAM_NO_ZEROMAP: | ||
| 894 | args->value = ctx->flags & CONTEXT_NO_ZEROMAP; | ||
| 895 | break; | ||
| 901 | default: | 896 | default: |
| 902 | ret = -EINVAL; | 897 | ret = -EINVAL; |
| 903 | break; | 898 | break; |
| @@ -935,6 +930,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | |||
| 935 | else | 930 | else |
| 936 | ctx->hang_stats.ban_period_seconds = args->value; | 931 | ctx->hang_stats.ban_period_seconds = args->value; |
| 937 | break; | 932 | break; |
| 933 | case I915_CONTEXT_PARAM_NO_ZEROMAP: | ||
| 934 | if (args->size) { | ||
| 935 | ret = -EINVAL; | ||
| 936 | } else { | ||
| 937 | ctx->flags &= ~CONTEXT_NO_ZEROMAP; | ||
| 938 | ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; | ||
| 939 | } | ||
| 940 | break; | ||
| 938 | default: | 941 | default: |
| 939 | ret = -EINVAL; | 942 | ret = -EINVAL; |
| 940 | break; | 943 | break; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a7fa14516cda..923a3c4bf0b7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -677,6 +677,7 @@ eb_vma_misplaced(struct i915_vma *vma) | |||
| 677 | static int | 677 | static int |
| 678 | i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | 678 | i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, |
| 679 | struct list_head *vmas, | 679 | struct list_head *vmas, |
| 680 | struct intel_context *ctx, | ||
| 680 | bool *need_relocs) | 681 | bool *need_relocs) |
| 681 | { | 682 | { |
| 682 | struct drm_i915_gem_object *obj; | 683 | struct drm_i915_gem_object *obj; |
| @@ -699,6 +700,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | |||
| 699 | obj = vma->obj; | 700 | obj = vma->obj; |
| 700 | entry = vma->exec_entry; | 701 | entry = vma->exec_entry; |
| 701 | 702 | ||
| 703 | if (ctx->flags & CONTEXT_NO_ZEROMAP) | ||
| 704 | entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; | ||
| 705 | |||
| 702 | if (!has_fenced_gpu_access) | 706 | if (!has_fenced_gpu_access) |
| 703 | entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; | 707 | entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; |
| 704 | need_fence = | 708 | need_fence = |
| @@ -776,7 +780,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
| 776 | struct drm_file *file, | 780 | struct drm_file *file, |
| 777 | struct intel_engine_cs *ring, | 781 | struct intel_engine_cs *ring, |
| 778 | struct eb_vmas *eb, | 782 | struct eb_vmas *eb, |
| 779 | struct drm_i915_gem_exec_object2 *exec) | 783 | struct drm_i915_gem_exec_object2 *exec, |
| 784 | struct intel_context *ctx) | ||
| 780 | { | 785 | { |
| 781 | struct drm_i915_gem_relocation_entry *reloc; | 786 | struct drm_i915_gem_relocation_entry *reloc; |
| 782 | struct i915_address_space *vm; | 787 | struct i915_address_space *vm; |
| @@ -862,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
| 862 | goto err; | 867 | goto err; |
| 863 | 868 | ||
| 864 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; | 869 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
| 865 | ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); | 870 | ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); |
| 866 | if (ret) | 871 | if (ret) |
| 867 | goto err; | 872 | goto err; |
| 868 | 873 | ||
| @@ -887,10 +892,10 @@ err: | |||
| 887 | } | 892 | } |
| 888 | 893 | ||
| 889 | static int | 894 | static int |
| 890 | i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, | 895 | i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, |
| 891 | struct list_head *vmas) | 896 | struct list_head *vmas) |
| 892 | { | 897 | { |
| 893 | const unsigned other_rings = ~intel_ring_flag(ring); | 898 | const unsigned other_rings = ~intel_ring_flag(req->ring); |
| 894 | struct i915_vma *vma; | 899 | struct i915_vma *vma; |
| 895 | uint32_t flush_domains = 0; | 900 | uint32_t flush_domains = 0; |
| 896 | bool flush_chipset = false; | 901 | bool flush_chipset = false; |
| @@ -900,7 +905,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, | |||
| 900 | struct drm_i915_gem_object *obj = vma->obj; | 905 | struct drm_i915_gem_object *obj = vma->obj; |
| 901 | 906 | ||
| 902 | if (obj->active & other_rings) { | 907 | if (obj->active & other_rings) { |
| 903 | ret = i915_gem_object_sync(obj, ring); | 908 | ret = i915_gem_object_sync(obj, req->ring, &req); |
| 904 | if (ret) | 909 | if (ret) |
| 905 | return ret; | 910 | return ret; |
| 906 | } | 911 | } |
| @@ -912,7 +917,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, | |||
| 912 | } | 917 | } |
| 913 | 918 | ||
| 914 | if (flush_chipset) | 919 | if (flush_chipset) |
| 915 | i915_gem_chipset_flush(ring->dev); | 920 | i915_gem_chipset_flush(req->ring->dev); |
| 916 | 921 | ||
| 917 | if (flush_domains & I915_GEM_DOMAIN_GTT) | 922 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
| 918 | wmb(); | 923 | wmb(); |
| @@ -920,7 +925,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, | |||
| 920 | /* Unconditionally invalidate gpu caches and ensure that we do flush | 925 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
| 921 | * any residual writes from the previous batch. | 926 | * any residual writes from the previous batch. |
| 922 | */ | 927 | */ |
| 923 | return intel_ring_invalidate_all_caches(ring); | 928 | return intel_ring_invalidate_all_caches(req); |
| 924 | } | 929 | } |
| 925 | 930 | ||
| 926 | static bool | 931 | static bool |
| @@ -953,6 +958,9 @@ validate_exec_list(struct drm_device *dev, | |||
| 953 | if (exec[i].flags & invalid_flags) | 958 | if (exec[i].flags & invalid_flags) |
| 954 | return -EINVAL; | 959 | return -EINVAL; |
| 955 | 960 | ||
| 961 | if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) | ||
| 962 | return -EINVAL; | ||
| 963 | |||
| 956 | /* First check for malicious input causing overflow in | 964 | /* First check for malicious input causing overflow in |
| 957 | * the worst case where we need to allocate the entire | 965 | * the worst case where we need to allocate the entire |
| 958 | * relocation tree as a single array. | 966 | * relocation tree as a single array. |
| @@ -1013,9 +1021,9 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, | |||
| 1013 | 1021 | ||
| 1014 | void | 1022 | void |
| 1015 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, | 1023 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
| 1016 | struct intel_engine_cs *ring) | 1024 | struct drm_i915_gem_request *req) |
| 1017 | { | 1025 | { |
| 1018 | struct drm_i915_gem_request *req = intel_ring_get_request(ring); | 1026 | struct intel_engine_cs *ring = i915_gem_request_get_ring(req); |
| 1019 | struct i915_vma *vma; | 1027 | struct i915_vma *vma; |
| 1020 | 1028 | ||
| 1021 | list_for_each_entry(vma, vmas, exec_list) { | 1029 | list_for_each_entry(vma, vmas, exec_list) { |
| @@ -1029,12 +1037,12 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
| 1029 | obj->base.pending_read_domains |= obj->base.read_domains; | 1037 | obj->base.pending_read_domains |= obj->base.read_domains; |
| 1030 | obj->base.read_domains = obj->base.pending_read_domains; | 1038 | obj->base.read_domains = obj->base.pending_read_domains; |
| 1031 | 1039 | ||
| 1032 | i915_vma_move_to_active(vma, ring); | 1040 | i915_vma_move_to_active(vma, req); |
| 1033 | if (obj->base.write_domain) { | 1041 | if (obj->base.write_domain) { |
| 1034 | obj->dirty = 1; | 1042 | obj->dirty = 1; |
| 1035 | i915_gem_request_assign(&obj->last_write_req, req); | 1043 | i915_gem_request_assign(&obj->last_write_req, req); |
| 1036 | 1044 | ||
| 1037 | intel_fb_obj_invalidate(obj, ring, ORIGIN_CS); | 1045 | intel_fb_obj_invalidate(obj, ORIGIN_CS); |
| 1038 | 1046 | ||
| 1039 | /* update for the implicit flush after a batch */ | 1047 | /* update for the implicit flush after a batch */ |
| 1040 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 1048 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
| @@ -1053,22 +1061,20 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
| 1053 | } | 1061 | } |
| 1054 | 1062 | ||
| 1055 | void | 1063 | void |
| 1056 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, | 1064 | i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) |
| 1057 | struct drm_file *file, | ||
| 1058 | struct intel_engine_cs *ring, | ||
| 1059 | struct drm_i915_gem_object *obj) | ||
| 1060 | { | 1065 | { |
| 1061 | /* Unconditionally force add_request to emit a full flush. */ | 1066 | /* Unconditionally force add_request to emit a full flush. */ |
| 1062 | ring->gpu_caches_dirty = true; | 1067 | params->ring->gpu_caches_dirty = true; |
| 1063 | 1068 | ||
| 1064 | /* Add a breadcrumb for the completion of the batch buffer */ | 1069 | /* Add a breadcrumb for the completion of the batch buffer */ |
| 1065 | (void)__i915_add_request(ring, file, obj); | 1070 | __i915_add_request(params->request, params->batch_obj, true); |
| 1066 | } | 1071 | } |
| 1067 | 1072 | ||
| 1068 | static int | 1073 | static int |
| 1069 | i915_reset_gen7_sol_offsets(struct drm_device *dev, | 1074 | i915_reset_gen7_sol_offsets(struct drm_device *dev, |
| 1070 | struct intel_engine_cs *ring) | 1075 | struct drm_i915_gem_request *req) |
| 1071 | { | 1076 | { |
| 1077 | struct intel_engine_cs *ring = req->ring; | ||
| 1072 | struct drm_i915_private *dev_priv = dev->dev_private; | 1078 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1073 | int ret, i; | 1079 | int ret, i; |
| 1074 | 1080 | ||
| @@ -1077,7 +1083,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, | |||
| 1077 | return -EINVAL; | 1083 | return -EINVAL; |
| 1078 | } | 1084 | } |
| 1079 | 1085 | ||
| 1080 | ret = intel_ring_begin(ring, 4 * 3); | 1086 | ret = intel_ring_begin(req, 4 * 3); |
| 1081 | if (ret) | 1087 | if (ret) |
| 1082 | return ret; | 1088 | return ret; |
| 1083 | 1089 | ||
| @@ -1093,10 +1099,11 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, | |||
| 1093 | } | 1099 | } |
| 1094 | 1100 | ||
| 1095 | static int | 1101 | static int |
| 1096 | i915_emit_box(struct intel_engine_cs *ring, | 1102 | i915_emit_box(struct drm_i915_gem_request *req, |
| 1097 | struct drm_clip_rect *box, | 1103 | struct drm_clip_rect *box, |
| 1098 | int DR1, int DR4) | 1104 | int DR1, int DR4) |
| 1099 | { | 1105 | { |
| 1106 | struct intel_engine_cs *ring = req->ring; | ||
| 1100 | int ret; | 1107 | int ret; |
| 1101 | 1108 | ||
| 1102 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || | 1109 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
| @@ -1107,7 +1114,7 @@ i915_emit_box(struct intel_engine_cs *ring, | |||
| 1107 | } | 1114 | } |
| 1108 | 1115 | ||
| 1109 | if (INTEL_INFO(ring->dev)->gen >= 4) { | 1116 | if (INTEL_INFO(ring->dev)->gen >= 4) { |
| 1110 | ret = intel_ring_begin(ring, 4); | 1117 | ret = intel_ring_begin(req, 4); |
| 1111 | if (ret) | 1118 | if (ret) |
| 1112 | return ret; | 1119 | return ret; |
| 1113 | 1120 | ||
| @@ -1116,7 +1123,7 @@ i915_emit_box(struct intel_engine_cs *ring, | |||
| 1116 | intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); | 1123 | intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); |
| 1117 | intel_ring_emit(ring, DR4); | 1124 | intel_ring_emit(ring, DR4); |
| 1118 | } else { | 1125 | } else { |
| 1119 | ret = intel_ring_begin(ring, 6); | 1126 | ret = intel_ring_begin(req, 6); |
| 1120 | if (ret) | 1127 | if (ret) |
| 1121 | return ret; | 1128 | return ret; |
| 1122 | 1129 | ||
| @@ -1186,17 +1193,15 @@ err: | |||
| 1186 | } | 1193 | } |
| 1187 | 1194 | ||
| 1188 | int | 1195 | int |
| 1189 | i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, | 1196 | i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, |
| 1190 | struct intel_engine_cs *ring, | ||
| 1191 | struct intel_context *ctx, | ||
| 1192 | struct drm_i915_gem_execbuffer2 *args, | 1197 | struct drm_i915_gem_execbuffer2 *args, |
| 1193 | struct list_head *vmas, | 1198 | struct list_head *vmas) |
| 1194 | struct drm_i915_gem_object *batch_obj, | ||
| 1195 | u64 exec_start, u32 dispatch_flags) | ||
| 1196 | { | 1199 | { |
| 1197 | struct drm_clip_rect *cliprects = NULL; | 1200 | struct drm_clip_rect *cliprects = NULL; |
| 1201 | struct drm_device *dev = params->dev; | ||
| 1202 | struct intel_engine_cs *ring = params->ring; | ||
| 1198 | struct drm_i915_private *dev_priv = dev->dev_private; | 1203 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1199 | u64 exec_len; | 1204 | u64 exec_start, exec_len; |
| 1200 | int instp_mode; | 1205 | int instp_mode; |
| 1201 | u32 instp_mask; | 1206 | u32 instp_mask; |
| 1202 | int i, ret = 0; | 1207 | int i, ret = 0; |
| @@ -1244,15 +1249,15 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, | |||
| 1244 | } | 1249 | } |
| 1245 | } | 1250 | } |
| 1246 | 1251 | ||
| 1247 | ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); | 1252 | ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); |
| 1248 | if (ret) | 1253 | if (ret) |
| 1249 | goto error; | 1254 | goto error; |
| 1250 | 1255 | ||
| 1251 | ret = i915_switch_context(ring, ctx); | 1256 | ret = i915_switch_context(params->request); |
| 1252 | if (ret) | 1257 | if (ret) |
| 1253 | goto error; | 1258 | goto error; |
| 1254 | 1259 | ||
| 1255 | WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id), | 1260 | WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id), |
| 1256 | "%s didn't clear reload\n", ring->name); | 1261 | "%s didn't clear reload\n", ring->name); |
| 1257 | 1262 | ||
| 1258 | instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; | 1263 | instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
| @@ -1294,7 +1299,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, | |||
| 1294 | 1299 | ||
| 1295 | if (ring == &dev_priv->ring[RCS] && | 1300 | if (ring == &dev_priv->ring[RCS] && |
| 1296 | instp_mode != dev_priv->relative_constants_mode) { | 1301 | instp_mode != dev_priv->relative_constants_mode) { |
| 1297 | ret = intel_ring_begin(ring, 4); | 1302 | ret = intel_ring_begin(params->request, 4); |
| 1298 | if (ret) | 1303 | if (ret) |
| 1299 | goto error; | 1304 | goto error; |
| 1300 | 1305 | ||
| @@ -1308,37 +1313,40 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, | |||
| 1308 | } | 1313 | } |
| 1309 | 1314 | ||
| 1310 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { | 1315 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
| 1311 | ret = i915_reset_gen7_sol_offsets(dev, ring); | 1316 | ret = i915_reset_gen7_sol_offsets(dev, params->request); |
| 1312 | if (ret) | 1317 | if (ret) |
| 1313 | goto error; | 1318 | goto error; |
| 1314 | } | 1319 | } |
| 1315 | 1320 | ||
| 1316 | exec_len = args->batch_len; | 1321 | exec_len = args->batch_len; |
| 1322 | exec_start = params->batch_obj_vm_offset + | ||
| 1323 | params->args_batch_start_offset; | ||
| 1324 | |||
| 1317 | if (cliprects) { | 1325 | if (cliprects) { |
| 1318 | for (i = 0; i < args->num_cliprects; i++) { | 1326 | for (i = 0; i < args->num_cliprects; i++) { |
| 1319 | ret = i915_emit_box(ring, &cliprects[i], | 1327 | ret = i915_emit_box(params->request, &cliprects[i], |
| 1320 | args->DR1, args->DR4); | 1328 | args->DR1, args->DR4); |
| 1321 | if (ret) | 1329 | if (ret) |
| 1322 | goto error; | 1330 | goto error; |
| 1323 | 1331 | ||
| 1324 | ret = ring->dispatch_execbuffer(ring, | 1332 | ret = ring->dispatch_execbuffer(params->request, |
| 1325 | exec_start, exec_len, | 1333 | exec_start, exec_len, |
| 1326 | dispatch_flags); | 1334 | params->dispatch_flags); |
| 1327 | if (ret) | 1335 | if (ret) |
| 1328 | goto error; | 1336 | goto error; |
| 1329 | } | 1337 | } |
| 1330 | } else { | 1338 | } else { |
| 1331 | ret = ring->dispatch_execbuffer(ring, | 1339 | ret = ring->dispatch_execbuffer(params->request, |
| 1332 | exec_start, exec_len, | 1340 | exec_start, exec_len, |
| 1333 | dispatch_flags); | 1341 | params->dispatch_flags); |
| 1334 | if (ret) | 1342 | if (ret) |
| 1335 | return ret; | 1343 | return ret; |
| 1336 | } | 1344 | } |
| 1337 | 1345 | ||
| 1338 | trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags); | 1346 | trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); |
| 1339 | 1347 | ||
| 1340 | i915_gem_execbuffer_move_to_active(vmas, ring); | 1348 | i915_gem_execbuffer_move_to_active(vmas, params->request); |
| 1341 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); | 1349 | i915_gem_execbuffer_retire_commands(params); |
| 1342 | 1350 | ||
| 1343 | error: | 1351 | error: |
| 1344 | kfree(cliprects); | 1352 | kfree(cliprects); |
| @@ -1408,8 +1416,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1408 | struct intel_engine_cs *ring; | 1416 | struct intel_engine_cs *ring; |
| 1409 | struct intel_context *ctx; | 1417 | struct intel_context *ctx; |
| 1410 | struct i915_address_space *vm; | 1418 | struct i915_address_space *vm; |
| 1419 | struct i915_execbuffer_params params_master; /* XXX: will be removed later */ | ||
| 1420 | struct i915_execbuffer_params *params = ¶ms_master; | ||
| 1411 | const u32 ctx_id = i915_execbuffer2_get_context_id(*args); | 1421 | const u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
| 1412 | u64 exec_start = args->batch_start_offset; | ||
| 1413 | u32 dispatch_flags; | 1422 | u32 dispatch_flags; |
| 1414 | int ret; | 1423 | int ret; |
| 1415 | bool need_relocs; | 1424 | bool need_relocs; |
| @@ -1482,6 +1491,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1482 | return -EINVAL; | 1491 | return -EINVAL; |
| 1483 | } | 1492 | } |
| 1484 | 1493 | ||
| 1494 | if (args->flags & I915_EXEC_RESOURCE_STREAMER) { | ||
| 1495 | if (!HAS_RESOURCE_STREAMER(dev)) { | ||
| 1496 | DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); | ||
| 1497 | return -EINVAL; | ||
| 1498 | } | ||
| 1499 | if (ring->id != RCS) { | ||
| 1500 | DRM_DEBUG("RS is not available on %s\n", | ||
| 1501 | ring->name); | ||
| 1502 | return -EINVAL; | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | dispatch_flags |= I915_DISPATCH_RS; | ||
| 1506 | } | ||
| 1507 | |||
| 1485 | intel_runtime_pm_get(dev_priv); | 1508 | intel_runtime_pm_get(dev_priv); |
| 1486 | 1509 | ||
| 1487 | ret = i915_mutex_lock_interruptible(dev); | 1510 | ret = i915_mutex_lock_interruptible(dev); |
| @@ -1502,6 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1502 | else | 1525 | else |
| 1503 | vm = &dev_priv->gtt.base; | 1526 | vm = &dev_priv->gtt.base; |
| 1504 | 1527 | ||
| 1528 | memset(¶ms_master, 0x00, sizeof(params_master)); | ||
| 1529 | |||
| 1505 | eb = eb_create(args); | 1530 | eb = eb_create(args); |
| 1506 | if (eb == NULL) { | 1531 | if (eb == NULL) { |
| 1507 | i915_gem_context_unreference(ctx); | 1532 | i915_gem_context_unreference(ctx); |
| @@ -1520,7 +1545,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1520 | 1545 | ||
| 1521 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 1546 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
| 1522 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; | 1547 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
| 1523 | ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); | 1548 | ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); |
| 1524 | if (ret) | 1549 | if (ret) |
| 1525 | goto err; | 1550 | goto err; |
| 1526 | 1551 | ||
| @@ -1530,7 +1555,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1530 | if (ret) { | 1555 | if (ret) { |
| 1531 | if (ret == -EFAULT) { | 1556 | if (ret == -EFAULT) { |
| 1532 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, | 1557 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
| 1533 | eb, exec); | 1558 | eb, exec, ctx); |
| 1534 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1559 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 1535 | } | 1560 | } |
| 1536 | if (ret) | 1561 | if (ret) |
| @@ -1544,6 +1569,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1544 | goto err; | 1569 | goto err; |
| 1545 | } | 1570 | } |
| 1546 | 1571 | ||
| 1572 | params->args_batch_start_offset = args->batch_start_offset; | ||
| 1547 | if (i915_needs_cmd_parser(ring) && args->batch_len) { | 1573 | if (i915_needs_cmd_parser(ring) && args->batch_len) { |
| 1548 | struct drm_i915_gem_object *parsed_batch_obj; | 1574 | struct drm_i915_gem_object *parsed_batch_obj; |
| 1549 | 1575 | ||
| @@ -1575,7 +1601,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1575 | * command parser has accepted. | 1601 | * command parser has accepted. |
| 1576 | */ | 1602 | */ |
| 1577 | dispatch_flags |= I915_DISPATCH_SECURE; | 1603 | dispatch_flags |= I915_DISPATCH_SECURE; |
| 1578 | exec_start = 0; | 1604 | params->args_batch_start_offset = 0; |
| 1579 | batch_obj = parsed_batch_obj; | 1605 | batch_obj = parsed_batch_obj; |
| 1580 | } | 1606 | } |
| 1581 | } | 1607 | } |
| @@ -1600,15 +1626,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1600 | if (ret) | 1626 | if (ret) |
| 1601 | goto err; | 1627 | goto err; |
| 1602 | 1628 | ||
| 1603 | exec_start += i915_gem_obj_ggtt_offset(batch_obj); | 1629 | params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj); |
| 1604 | } else | 1630 | } else |
| 1605 | exec_start += i915_gem_obj_offset(batch_obj, vm); | 1631 | params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); |
| 1606 | 1632 | ||
| 1607 | ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args, | 1633 | /* Allocate a request for this batch buffer nice and early. */ |
| 1608 | &eb->vmas, batch_obj, exec_start, | 1634 | ret = i915_gem_request_alloc(ring, ctx, ¶ms->request); |
| 1609 | dispatch_flags); | 1635 | if (ret) |
| 1636 | goto err_batch_unpin; | ||
| 1637 | |||
| 1638 | ret = i915_gem_request_add_to_client(params->request, file); | ||
| 1639 | if (ret) | ||
| 1640 | goto err_batch_unpin; | ||
| 1610 | 1641 | ||
| 1611 | /* | 1642 | /* |
| 1643 | * Save assorted stuff away to pass through to *_submission(). | ||
| 1644 | * NB: This data should be 'persistent' and not local as it will | ||
| 1645 | * kept around beyond the duration of the IOCTL once the GPU | ||
| 1646 | * scheduler arrives. | ||
| 1647 | */ | ||
| 1648 | params->dev = dev; | ||
| 1649 | params->file = file; | ||
| 1650 | params->ring = ring; | ||
| 1651 | params->dispatch_flags = dispatch_flags; | ||
| 1652 | params->batch_obj = batch_obj; | ||
| 1653 | params->ctx = ctx; | ||
| 1654 | |||
| 1655 | ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); | ||
| 1656 | |||
| 1657 | err_batch_unpin: | ||
| 1658 | /* | ||
| 1612 | * FIXME: We crucially rely upon the active tracking for the (ppgtt) | 1659 | * FIXME: We crucially rely upon the active tracking for the (ppgtt) |
| 1613 | * batch vma for correctness. For less ugly and less fragility this | 1660 | * batch vma for correctness. For less ugly and less fragility this |
| 1614 | * needs to be adjusted to also track the ggtt batch vma properly as | 1661 | * needs to be adjusted to also track the ggtt batch vma properly as |
| @@ -1616,11 +1663,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1616 | */ | 1663 | */ |
| 1617 | if (dispatch_flags & I915_DISPATCH_SECURE) | 1664 | if (dispatch_flags & I915_DISPATCH_SECURE) |
| 1618 | i915_gem_object_ggtt_unpin(batch_obj); | 1665 | i915_gem_object_ggtt_unpin(batch_obj); |
| 1666 | |||
| 1619 | err: | 1667 | err: |
| 1620 | /* the request owns the ref now */ | 1668 | /* the request owns the ref now */ |
| 1621 | i915_gem_context_unreference(ctx); | 1669 | i915_gem_context_unreference(ctx); |
| 1622 | eb_destroy(eb); | 1670 | eb_destroy(eb); |
| 1623 | 1671 | ||
| 1672 | /* | ||
| 1673 | * If the request was created but not successfully submitted then it | ||
| 1674 | * must be freed again. If it was submitted then it is being tracked | ||
| 1675 | * on the active request list and no clean up is required here. | ||
| 1676 | */ | ||
| 1677 | if (ret && params->request) | ||
| 1678 | i915_gem_request_cancel(params->request); | ||
| 1679 | |||
| 1624 | mutex_unlock(&dev->struct_mutex); | 1680 | mutex_unlock(&dev->struct_mutex); |
| 1625 | 1681 | ||
| 1626 | pre_mutex_err: | 1682 | pre_mutex_err: |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56b52a4767d4..c2a291e09bd9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -192,9 +192,8 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr, | |||
| 192 | return pte; | 192 | return pte; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | static gen8_pde_t gen8_pde_encode(struct drm_device *dev, | 195 | static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, |
| 196 | dma_addr_t addr, | 196 | const enum i915_cache_level level) |
| 197 | enum i915_cache_level level) | ||
| 198 | { | 197 | { |
| 199 | gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; | 198 | gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
| 200 | pde |= addr; | 199 | pde |= addr; |
| @@ -301,75 +300,120 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, | |||
| 301 | return pte; | 300 | return pte; |
| 302 | } | 301 | } |
| 303 | 302 | ||
| 304 | #define i915_dma_unmap_single(px, dev) \ | 303 | static int __setup_page_dma(struct drm_device *dev, |
| 305 | __i915_dma_unmap_single((px)->daddr, dev) | 304 | struct i915_page_dma *p, gfp_t flags) |
| 306 | |||
| 307 | static void __i915_dma_unmap_single(dma_addr_t daddr, | ||
| 308 | struct drm_device *dev) | ||
| 309 | { | 305 | { |
| 310 | struct device *device = &dev->pdev->dev; | 306 | struct device *device = &dev->pdev->dev; |
| 311 | 307 | ||
| 312 | dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | 308 | p->page = alloc_page(flags); |
| 309 | if (!p->page) | ||
| 310 | return -ENOMEM; | ||
| 311 | |||
| 312 | p->daddr = dma_map_page(device, | ||
| 313 | p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | ||
| 314 | |||
| 315 | if (dma_mapping_error(device, p->daddr)) { | ||
| 316 | __free_page(p->page); | ||
| 317 | return -EINVAL; | ||
| 318 | } | ||
| 319 | |||
| 320 | return 0; | ||
| 313 | } | 321 | } |
| 314 | 322 | ||
| 315 | /** | 323 | static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) |
| 316 | * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc. | 324 | { |
| 317 | * @px: Page table/dir/etc to get a DMA map for | 325 | return __setup_page_dma(dev, p, GFP_KERNEL); |
| 318 | * @dev: drm device | 326 | } |
| 319 | * | ||
| 320 | * Page table allocations are unified across all gens. They always require a | ||
| 321 | * single 4k allocation, as well as a DMA mapping. If we keep the structs | ||
| 322 | * symmetric here, the simple macro covers us for every page table type. | ||
| 323 | * | ||
| 324 | * Return: 0 if success. | ||
| 325 | */ | ||
| 326 | #define i915_dma_map_single(px, dev) \ | ||
| 327 | i915_dma_map_page_single((px)->page, (dev), &(px)->daddr) | ||
| 328 | 327 | ||
| 329 | static int i915_dma_map_page_single(struct page *page, | 328 | static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) |
| 330 | struct drm_device *dev, | ||
| 331 | dma_addr_t *daddr) | ||
| 332 | { | 329 | { |
| 333 | struct device *device = &dev->pdev->dev; | 330 | if (WARN_ON(!p->page)) |
| 331 | return; | ||
| 334 | 332 | ||
| 335 | *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | 333 | dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL); |
| 336 | if (dma_mapping_error(device, *daddr)) | 334 | __free_page(p->page); |
| 337 | return -ENOMEM; | 335 | memset(p, 0, sizeof(*p)); |
| 336 | } | ||
| 338 | 337 | ||
| 339 | return 0; | 338 | static void *kmap_page_dma(struct i915_page_dma *p) |
| 339 | { | ||
| 340 | return kmap_atomic(p->page); | ||
| 340 | } | 341 | } |
| 341 | 342 | ||
| 342 | static void unmap_and_free_pt(struct i915_page_table *pt, | 343 | /* We use the flushing unmap only with ppgtt structures: |
| 343 | struct drm_device *dev) | 344 | * page directories, page tables and scratch pages. |
| 345 | */ | ||
| 346 | static void kunmap_page_dma(struct drm_device *dev, void *vaddr) | ||
| 344 | { | 347 | { |
| 345 | if (WARN_ON(!pt->page)) | 348 | /* There are only few exceptions for gen >=6. chv and bxt. |
| 346 | return; | 349 | * And we are not sure about the latter so play safe for now. |
| 350 | */ | ||
| 351 | if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) | ||
| 352 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | ||
| 347 | 353 | ||
| 348 | i915_dma_unmap_single(pt, dev); | 354 | kunmap_atomic(vaddr); |
| 349 | __free_page(pt->page); | ||
| 350 | kfree(pt->used_ptes); | ||
| 351 | kfree(pt); | ||
| 352 | } | 355 | } |
| 353 | 356 | ||
| 354 | static void gen8_initialize_pt(struct i915_address_space *vm, | 357 | #define kmap_px(px) kmap_page_dma(px_base(px)) |
| 355 | struct i915_page_table *pt) | 358 | #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr)) |
| 359 | |||
| 360 | #define setup_px(dev, px) setup_page_dma((dev), px_base(px)) | ||
| 361 | #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px)) | ||
| 362 | #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v)) | ||
| 363 | #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v)) | ||
| 364 | |||
| 365 | static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p, | ||
| 366 | const uint64_t val) | ||
| 356 | { | 367 | { |
| 357 | gen8_pte_t *pt_vaddr, scratch_pte; | ||
| 358 | int i; | 368 | int i; |
| 369 | uint64_t * const vaddr = kmap_page_dma(p); | ||
| 359 | 370 | ||
| 360 | pt_vaddr = kmap_atomic(pt->page); | 371 | for (i = 0; i < 512; i++) |
| 361 | scratch_pte = gen8_pte_encode(vm->scratch.addr, | 372 | vaddr[i] = val; |
| 362 | I915_CACHE_LLC, true); | 373 | |
| 374 | kunmap_page_dma(dev, vaddr); | ||
| 375 | } | ||
| 376 | |||
| 377 | static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p, | ||
| 378 | const uint32_t val32) | ||
| 379 | { | ||
| 380 | uint64_t v = val32; | ||
| 363 | 381 | ||
| 364 | for (i = 0; i < GEN8_PTES; i++) | 382 | v = v << 32 | val32; |
| 365 | pt_vaddr[i] = scratch_pte; | ||
| 366 | 383 | ||
| 367 | if (!HAS_LLC(vm->dev)) | 384 | fill_page_dma(dev, p, v); |
| 368 | drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); | ||
| 369 | kunmap_atomic(pt_vaddr); | ||
| 370 | } | 385 | } |
| 371 | 386 | ||
| 372 | static struct i915_page_table *alloc_pt_single(struct drm_device *dev) | 387 | static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev) |
| 388 | { | ||
| 389 | struct i915_page_scratch *sp; | ||
| 390 | int ret; | ||
| 391 | |||
| 392 | sp = kzalloc(sizeof(*sp), GFP_KERNEL); | ||
| 393 | if (sp == NULL) | ||
| 394 | return ERR_PTR(-ENOMEM); | ||
| 395 | |||
| 396 | ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO); | ||
| 397 | if (ret) { | ||
| 398 | kfree(sp); | ||
| 399 | return ERR_PTR(ret); | ||
| 400 | } | ||
| 401 | |||
| 402 | set_pages_uc(px_page(sp), 1); | ||
| 403 | |||
| 404 | return sp; | ||
| 405 | } | ||
| 406 | |||
| 407 | static void free_scratch_page(struct drm_device *dev, | ||
| 408 | struct i915_page_scratch *sp) | ||
| 409 | { | ||
| 410 | set_pages_wb(px_page(sp), 1); | ||
| 411 | |||
| 412 | cleanup_px(dev, sp); | ||
| 413 | kfree(sp); | ||
| 414 | } | ||
| 415 | |||
| 416 | static struct i915_page_table *alloc_pt(struct drm_device *dev) | ||
| 373 | { | 417 | { |
| 374 | struct i915_page_table *pt; | 418 | struct i915_page_table *pt; |
| 375 | const size_t count = INTEL_INFO(dev)->gen >= 8 ? | 419 | const size_t count = INTEL_INFO(dev)->gen >= 8 ? |
| @@ -386,19 +430,13 @@ static struct i915_page_table *alloc_pt_single(struct drm_device *dev) | |||
| 386 | if (!pt->used_ptes) | 430 | if (!pt->used_ptes) |
| 387 | goto fail_bitmap; | 431 | goto fail_bitmap; |
| 388 | 432 | ||
| 389 | pt->page = alloc_page(GFP_KERNEL); | 433 | ret = setup_px(dev, pt); |
| 390 | if (!pt->page) | ||
| 391 | goto fail_page; | ||
| 392 | |||
| 393 | ret = i915_dma_map_single(pt, dev); | ||
| 394 | if (ret) | 434 | if (ret) |
| 395 | goto fail_dma; | 435 | goto fail_page_m; |
| 396 | 436 | ||
| 397 | return pt; | 437 | return pt; |
| 398 | 438 | ||
| 399 | fail_dma: | 439 | fail_page_m: |
| 400 | __free_page(pt->page); | ||
| 401 | fail_page: | ||
| 402 | kfree(pt->used_ptes); | 440 | kfree(pt->used_ptes); |
| 403 | fail_bitmap: | 441 | fail_bitmap: |
| 404 | kfree(pt); | 442 | kfree(pt); |
| @@ -406,18 +444,38 @@ fail_bitmap: | |||
| 406 | return ERR_PTR(ret); | 444 | return ERR_PTR(ret); |
| 407 | } | 445 | } |
| 408 | 446 | ||
| 409 | static void unmap_and_free_pd(struct i915_page_directory *pd, | 447 | static void free_pt(struct drm_device *dev, struct i915_page_table *pt) |
| 410 | struct drm_device *dev) | ||
| 411 | { | 448 | { |
| 412 | if (pd->page) { | 449 | cleanup_px(dev, pt); |
| 413 | i915_dma_unmap_single(pd, dev); | 450 | kfree(pt->used_ptes); |
| 414 | __free_page(pd->page); | 451 | kfree(pt); |
| 415 | kfree(pd->used_pdes); | ||
| 416 | kfree(pd); | ||
| 417 | } | ||
| 418 | } | 452 | } |
| 419 | 453 | ||
| 420 | static struct i915_page_directory *alloc_pd_single(struct drm_device *dev) | 454 | static void gen8_initialize_pt(struct i915_address_space *vm, |
| 455 | struct i915_page_table *pt) | ||
| 456 | { | ||
| 457 | gen8_pte_t scratch_pte; | ||
| 458 | |||
| 459 | scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), | ||
| 460 | I915_CACHE_LLC, true); | ||
| 461 | |||
| 462 | fill_px(vm->dev, pt, scratch_pte); | ||
| 463 | } | ||
| 464 | |||
| 465 | static void gen6_initialize_pt(struct i915_address_space *vm, | ||
| 466 | struct i915_page_table *pt) | ||
| 467 | { | ||
| 468 | gen6_pte_t scratch_pte; | ||
| 469 | |||
| 470 | WARN_ON(px_dma(vm->scratch_page) == 0); | ||
| 471 | |||
| 472 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), | ||
| 473 | I915_CACHE_LLC, true, 0); | ||
| 474 | |||
| 475 | fill32_px(vm->dev, pt, scratch_pte); | ||
| 476 | } | ||
| 477 | |||
| 478 | static struct i915_page_directory *alloc_pd(struct drm_device *dev) | ||
| 421 | { | 479 | { |
| 422 | struct i915_page_directory *pd; | 480 | struct i915_page_directory *pd; |
| 423 | int ret = -ENOMEM; | 481 | int ret = -ENOMEM; |
| @@ -429,38 +487,52 @@ static struct i915_page_directory *alloc_pd_single(struct drm_device *dev) | |||
| 429 | pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), | 487 | pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), |
| 430 | sizeof(*pd->used_pdes), GFP_KERNEL); | 488 | sizeof(*pd->used_pdes), GFP_KERNEL); |
| 431 | if (!pd->used_pdes) | 489 | if (!pd->used_pdes) |
| 432 | goto free_pd; | 490 | goto fail_bitmap; |
| 433 | |||
| 434 | pd->page = alloc_page(GFP_KERNEL); | ||
| 435 | if (!pd->page) | ||
| 436 | goto free_bitmap; | ||
| 437 | 491 | ||
| 438 | ret = i915_dma_map_single(pd, dev); | 492 | ret = setup_px(dev, pd); |
| 439 | if (ret) | 493 | if (ret) |
| 440 | goto free_page; | 494 | goto fail_page_m; |
| 441 | 495 | ||
| 442 | return pd; | 496 | return pd; |
| 443 | 497 | ||
| 444 | free_page: | 498 | fail_page_m: |
| 445 | __free_page(pd->page); | ||
| 446 | free_bitmap: | ||
| 447 | kfree(pd->used_pdes); | 499 | kfree(pd->used_pdes); |
| 448 | free_pd: | 500 | fail_bitmap: |
| 449 | kfree(pd); | 501 | kfree(pd); |
| 450 | 502 | ||
| 451 | return ERR_PTR(ret); | 503 | return ERR_PTR(ret); |
| 452 | } | 504 | } |
| 453 | 505 | ||
| 506 | static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) | ||
| 507 | { | ||
| 508 | if (px_page(pd)) { | ||
| 509 | cleanup_px(dev, pd); | ||
| 510 | kfree(pd->used_pdes); | ||
| 511 | kfree(pd); | ||
| 512 | } | ||
| 513 | } | ||
| 514 | |||
| 515 | static void gen8_initialize_pd(struct i915_address_space *vm, | ||
| 516 | struct i915_page_directory *pd) | ||
| 517 | { | ||
| 518 | gen8_pde_t scratch_pde; | ||
| 519 | |||
| 520 | scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); | ||
| 521 | |||
| 522 | fill_px(vm->dev, pd, scratch_pde); | ||
| 523 | } | ||
| 524 | |||
| 454 | /* Broadwell Page Directory Pointer Descriptors */ | 525 | /* Broadwell Page Directory Pointer Descriptors */ |
| 455 | static int gen8_write_pdp(struct intel_engine_cs *ring, | 526 | static int gen8_write_pdp(struct drm_i915_gem_request *req, |
| 456 | unsigned entry, | 527 | unsigned entry, |
| 457 | dma_addr_t addr) | 528 | dma_addr_t addr) |
| 458 | { | 529 | { |
| 530 | struct intel_engine_cs *ring = req->ring; | ||
| 459 | int ret; | 531 | int ret; |
| 460 | 532 | ||
| 461 | BUG_ON(entry >= 4); | 533 | BUG_ON(entry >= 4); |
| 462 | 534 | ||
| 463 | ret = intel_ring_begin(ring, 6); | 535 | ret = intel_ring_begin(req, 6); |
| 464 | if (ret) | 536 | if (ret) |
| 465 | return ret; | 537 | return ret; |
| 466 | 538 | ||
| @@ -476,16 +548,14 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, | |||
| 476 | } | 548 | } |
| 477 | 549 | ||
| 478 | static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, | 550 | static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, |
| 479 | struct intel_engine_cs *ring) | 551 | struct drm_i915_gem_request *req) |
| 480 | { | 552 | { |
| 481 | int i, ret; | 553 | int i, ret; |
| 482 | 554 | ||
| 483 | for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { | 555 | for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { |
| 484 | struct i915_page_directory *pd = ppgtt->pdp.page_directory[i]; | 556 | const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); |
| 485 | dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr; | 557 | |
| 486 | /* The page directory might be NULL, but we need to clear out | 558 | ret = gen8_write_pdp(req, i, pd_daddr); |
| 487 | * whatever the previous context might have used. */ | ||
| 488 | ret = gen8_write_pdp(ring, i, pd_daddr); | ||
| 489 | if (ret) | 559 | if (ret) |
| 490 | return ret; | 560 | return ret; |
| 491 | } | 561 | } |
| @@ -507,13 +577,12 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | |||
| 507 | unsigned num_entries = length >> PAGE_SHIFT; | 577 | unsigned num_entries = length >> PAGE_SHIFT; |
| 508 | unsigned last_pte, i; | 578 | unsigned last_pte, i; |
| 509 | 579 | ||
| 510 | scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, | 580 | scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page), |
| 511 | I915_CACHE_LLC, use_scratch); | 581 | I915_CACHE_LLC, use_scratch); |
| 512 | 582 | ||
| 513 | while (num_entries) { | 583 | while (num_entries) { |
| 514 | struct i915_page_directory *pd; | 584 | struct i915_page_directory *pd; |
| 515 | struct i915_page_table *pt; | 585 | struct i915_page_table *pt; |
| 516 | struct page *page_table; | ||
| 517 | 586 | ||
| 518 | if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) | 587 | if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) |
| 519 | break; | 588 | break; |
| @@ -525,25 +594,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | |||
| 525 | 594 | ||
| 526 | pt = pd->page_table[pde]; | 595 | pt = pd->page_table[pde]; |
| 527 | 596 | ||
| 528 | if (WARN_ON(!pt->page)) | 597 | if (WARN_ON(!px_page(pt))) |
| 529 | break; | 598 | break; |
| 530 | 599 | ||
| 531 | page_table = pt->page; | ||
| 532 | |||
| 533 | last_pte = pte + num_entries; | 600 | last_pte = pte + num_entries; |
| 534 | if (last_pte > GEN8_PTES) | 601 | if (last_pte > GEN8_PTES) |
| 535 | last_pte = GEN8_PTES; | 602 | last_pte = GEN8_PTES; |
| 536 | 603 | ||
| 537 | pt_vaddr = kmap_atomic(page_table); | 604 | pt_vaddr = kmap_px(pt); |
| 538 | 605 | ||
| 539 | for (i = pte; i < last_pte; i++) { | 606 | for (i = pte; i < last_pte; i++) { |
| 540 | pt_vaddr[i] = scratch_pte; | 607 | pt_vaddr[i] = scratch_pte; |
| 541 | num_entries--; | 608 | num_entries--; |
| 542 | } | 609 | } |
| 543 | 610 | ||
| 544 | if (!HAS_LLC(ppgtt->base.dev)) | 611 | kunmap_px(ppgtt, pt); |
| 545 | drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); | ||
| 546 | kunmap_atomic(pt_vaddr); | ||
| 547 | 612 | ||
| 548 | pte = 0; | 613 | pte = 0; |
| 549 | if (++pde == I915_PDES) { | 614 | if (++pde == I915_PDES) { |
| @@ -575,18 +640,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | |||
| 575 | if (pt_vaddr == NULL) { | 640 | if (pt_vaddr == NULL) { |
| 576 | struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe]; | 641 | struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe]; |
| 577 | struct i915_page_table *pt = pd->page_table[pde]; | 642 | struct i915_page_table *pt = pd->page_table[pde]; |
| 578 | struct page *page_table = pt->page; | 643 | pt_vaddr = kmap_px(pt); |
| 579 | |||
| 580 | pt_vaddr = kmap_atomic(page_table); | ||
| 581 | } | 644 | } |
| 582 | 645 | ||
| 583 | pt_vaddr[pte] = | 646 | pt_vaddr[pte] = |
| 584 | gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), | 647 | gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), |
| 585 | cache_level, true); | 648 | cache_level, true); |
| 586 | if (++pte == GEN8_PTES) { | 649 | if (++pte == GEN8_PTES) { |
| 587 | if (!HAS_LLC(ppgtt->base.dev)) | 650 | kunmap_px(ppgtt, pt_vaddr); |
| 588 | drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); | ||
| 589 | kunmap_atomic(pt_vaddr); | ||
| 590 | pt_vaddr = NULL; | 651 | pt_vaddr = NULL; |
| 591 | if (++pde == I915_PDES) { | 652 | if (++pde == I915_PDES) { |
| 592 | pdpe++; | 653 | pdpe++; |
| @@ -595,58 +656,64 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | |||
| 595 | pte = 0; | 656 | pte = 0; |
| 596 | } | 657 | } |
| 597 | } | 658 | } |
| 598 | if (pt_vaddr) { | ||
| 599 | if (!HAS_LLC(ppgtt->base.dev)) | ||
| 600 | drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); | ||
| 601 | kunmap_atomic(pt_vaddr); | ||
| 602 | } | ||
| 603 | } | ||
| 604 | |||
| 605 | static void __gen8_do_map_pt(gen8_pde_t * const pde, | ||
| 606 | struct i915_page_table *pt, | ||
| 607 | struct drm_device *dev) | ||
| 608 | { | ||
| 609 | gen8_pde_t entry = | ||
| 610 | gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC); | ||
| 611 | *pde = entry; | ||
| 612 | } | ||
| 613 | 659 | ||
| 614 | static void gen8_initialize_pd(struct i915_address_space *vm, | 660 | if (pt_vaddr) |
| 615 | struct i915_page_directory *pd) | 661 | kunmap_px(ppgtt, pt_vaddr); |
| 616 | { | ||
| 617 | struct i915_hw_ppgtt *ppgtt = | ||
| 618 | container_of(vm, struct i915_hw_ppgtt, base); | ||
| 619 | gen8_pde_t *page_directory; | ||
| 620 | struct i915_page_table *pt; | ||
| 621 | int i; | ||
| 622 | |||
| 623 | page_directory = kmap_atomic(pd->page); | ||
| 624 | pt = ppgtt->scratch_pt; | ||
| 625 | for (i = 0; i < I915_PDES; i++) | ||
| 626 | /* Map the PDE to the page table */ | ||
| 627 | __gen8_do_map_pt(page_directory + i, pt, vm->dev); | ||
| 628 | |||
| 629 | if (!HAS_LLC(vm->dev)) | ||
| 630 | drm_clflush_virt_range(page_directory, PAGE_SIZE); | ||
| 631 | kunmap_atomic(page_directory); | ||
| 632 | } | 662 | } |
| 633 | 663 | ||
| 634 | static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev) | 664 | static void gen8_free_page_tables(struct drm_device *dev, |
| 665 | struct i915_page_directory *pd) | ||
| 635 | { | 666 | { |
| 636 | int i; | 667 | int i; |
| 637 | 668 | ||
| 638 | if (!pd->page) | 669 | if (!px_page(pd)) |
| 639 | return; | 670 | return; |
| 640 | 671 | ||
| 641 | for_each_set_bit(i, pd->used_pdes, I915_PDES) { | 672 | for_each_set_bit(i, pd->used_pdes, I915_PDES) { |
| 642 | if (WARN_ON(!pd->page_table[i])) | 673 | if (WARN_ON(!pd->page_table[i])) |
| 643 | continue; | 674 | continue; |
| 644 | 675 | ||
| 645 | unmap_and_free_pt(pd->page_table[i], dev); | 676 | free_pt(dev, pd->page_table[i]); |
| 646 | pd->page_table[i] = NULL; | 677 | pd->page_table[i] = NULL; |
| 647 | } | 678 | } |
| 648 | } | 679 | } |
| 649 | 680 | ||
| 681 | static int gen8_init_scratch(struct i915_address_space *vm) | ||
| 682 | { | ||
| 683 | struct drm_device *dev = vm->dev; | ||
| 684 | |||
| 685 | vm->scratch_page = alloc_scratch_page(dev); | ||
| 686 | if (IS_ERR(vm->scratch_page)) | ||
| 687 | return PTR_ERR(vm->scratch_page); | ||
| 688 | |||
| 689 | vm->scratch_pt = alloc_pt(dev); | ||
| 690 | if (IS_ERR(vm->scratch_pt)) { | ||
| 691 | free_scratch_page(dev, vm->scratch_page); | ||
| 692 | return PTR_ERR(vm->scratch_pt); | ||
| 693 | } | ||
| 694 | |||
| 695 | vm->scratch_pd = alloc_pd(dev); | ||
| 696 | if (IS_ERR(vm->scratch_pd)) { | ||
| 697 | free_pt(dev, vm->scratch_pt); | ||
| 698 | free_scratch_page(dev, vm->scratch_page); | ||
| 699 | return PTR_ERR(vm->scratch_pd); | ||
| 700 | } | ||
| 701 | |||
| 702 | gen8_initialize_pt(vm, vm->scratch_pt); | ||
| 703 | gen8_initialize_pd(vm, vm->scratch_pd); | ||
| 704 | |||
| 705 | return 0; | ||
| 706 | } | ||
| 707 | |||
| 708 | static void gen8_free_scratch(struct i915_address_space *vm) | ||
| 709 | { | ||
| 710 | struct drm_device *dev = vm->dev; | ||
| 711 | |||
| 712 | free_pd(dev, vm->scratch_pd); | ||
| 713 | free_pt(dev, vm->scratch_pt); | ||
| 714 | free_scratch_page(dev, vm->scratch_page); | ||
| 715 | } | ||
| 716 | |||
| 650 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | 717 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
| 651 | { | 718 | { |
| 652 | struct i915_hw_ppgtt *ppgtt = | 719 | struct i915_hw_ppgtt *ppgtt = |
| @@ -657,12 +724,12 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | |||
| 657 | if (WARN_ON(!ppgtt->pdp.page_directory[i])) | 724 | if (WARN_ON(!ppgtt->pdp.page_directory[i])) |
| 658 | continue; | 725 | continue; |
| 659 | 726 | ||
| 660 | gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); | 727 | gen8_free_page_tables(ppgtt->base.dev, |
| 661 | unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev); | 728 | ppgtt->pdp.page_directory[i]); |
| 729 | free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]); | ||
| 662 | } | 730 | } |
| 663 | 731 | ||
| 664 | unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev); | 732 | gen8_free_scratch(vm); |
| 665 | unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); | ||
| 666 | } | 733 | } |
| 667 | 734 | ||
| 668 | /** | 735 | /** |
| @@ -698,24 +765,24 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, | |||
| 698 | /* Don't reallocate page tables */ | 765 | /* Don't reallocate page tables */ |
| 699 | if (pt) { | 766 | if (pt) { |
| 700 | /* Scratch is never allocated this way */ | 767 | /* Scratch is never allocated this way */ |
| 701 | WARN_ON(pt == ppgtt->scratch_pt); | 768 | WARN_ON(pt == ppgtt->base.scratch_pt); |
| 702 | continue; | 769 | continue; |
| 703 | } | 770 | } |
| 704 | 771 | ||
| 705 | pt = alloc_pt_single(dev); | 772 | pt = alloc_pt(dev); |
| 706 | if (IS_ERR(pt)) | 773 | if (IS_ERR(pt)) |
| 707 | goto unwind_out; | 774 | goto unwind_out; |
| 708 | 775 | ||
| 709 | gen8_initialize_pt(&ppgtt->base, pt); | 776 | gen8_initialize_pt(&ppgtt->base, pt); |
| 710 | pd->page_table[pde] = pt; | 777 | pd->page_table[pde] = pt; |
| 711 | set_bit(pde, new_pts); | 778 | __set_bit(pde, new_pts); |
| 712 | } | 779 | } |
| 713 | 780 | ||
| 714 | return 0; | 781 | return 0; |
| 715 | 782 | ||
| 716 | unwind_out: | 783 | unwind_out: |
| 717 | for_each_set_bit(pde, new_pts, I915_PDES) | 784 | for_each_set_bit(pde, new_pts, I915_PDES) |
| 718 | unmap_and_free_pt(pd->page_table[pde], dev); | 785 | free_pt(dev, pd->page_table[pde]); |
| 719 | 786 | ||
| 720 | return -ENOMEM; | 787 | return -ENOMEM; |
| 721 | } | 788 | } |
| @@ -756,27 +823,24 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, | |||
| 756 | 823 | ||
| 757 | WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES)); | 824 | WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES)); |
| 758 | 825 | ||
| 759 | /* FIXME: upper bound must not overflow 32 bits */ | ||
| 760 | WARN_ON((start + length) > (1ULL << 32)); | ||
| 761 | |||
| 762 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { | 826 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { |
| 763 | if (pd) | 827 | if (pd) |
| 764 | continue; | 828 | continue; |
| 765 | 829 | ||
| 766 | pd = alloc_pd_single(dev); | 830 | pd = alloc_pd(dev); |
| 767 | if (IS_ERR(pd)) | 831 | if (IS_ERR(pd)) |
| 768 | goto unwind_out; | 832 | goto unwind_out; |
| 769 | 833 | ||
| 770 | gen8_initialize_pd(&ppgtt->base, pd); | 834 | gen8_initialize_pd(&ppgtt->base, pd); |
| 771 | pdp->page_directory[pdpe] = pd; | 835 | pdp->page_directory[pdpe] = pd; |
| 772 | set_bit(pdpe, new_pds); | 836 | __set_bit(pdpe, new_pds); |
| 773 | } | 837 | } |
| 774 | 838 | ||
| 775 | return 0; | 839 | return 0; |
| 776 | 840 | ||
| 777 | unwind_out: | 841 | unwind_out: |
| 778 | for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES) | 842 | for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES) |
| 779 | unmap_and_free_pd(pdp->page_directory[pdpe], dev); | 843 | free_pd(dev, pdp->page_directory[pdpe]); |
| 780 | 844 | ||
| 781 | return -ENOMEM; | 845 | return -ENOMEM; |
| 782 | } | 846 | } |
| @@ -830,6 +894,16 @@ err_out: | |||
| 830 | return -ENOMEM; | 894 | return -ENOMEM; |
| 831 | } | 895 | } |
| 832 | 896 | ||
| 897 | /* PDE TLBs are a pain to invalidate on GEN8+. When we modify | ||
| 898 | * the page table structures, we mark them dirty so that | ||
| 899 | * context switching/execlist queuing code takes extra steps | ||
| 900 | * to ensure that tlbs are flushed. | ||
| 901 | */ | ||
| 902 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | ||
| 903 | { | ||
| 904 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; | ||
| 905 | } | ||
| 906 | |||
| 833 | static int gen8_alloc_va_range(struct i915_address_space *vm, | 907 | static int gen8_alloc_va_range(struct i915_address_space *vm, |
| 834 | uint64_t start, | 908 | uint64_t start, |
| 835 | uint64_t length) | 909 | uint64_t length) |
| @@ -848,7 +922,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
| 848 | * actually use the other side of the canonical address space. | 922 | * actually use the other side of the canonical address space. |
| 849 | */ | 923 | */ |
| 850 | if (WARN_ON(start + length < start)) | 924 | if (WARN_ON(start + length < start)) |
| 851 | return -ERANGE; | 925 | return -ENODEV; |
| 926 | |||
| 927 | if (WARN_ON(start + length > ppgtt->base.total)) | ||
| 928 | return -ENODEV; | ||
| 852 | 929 | ||
| 853 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables); | 930 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables); |
| 854 | if (ret) | 931 | if (ret) |
| @@ -876,7 +953,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
| 876 | /* Allocations have completed successfully, so set the bitmaps, and do | 953 | /* Allocations have completed successfully, so set the bitmaps, and do |
| 877 | * the mappings. */ | 954 | * the mappings. */ |
| 878 | gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { | 955 | gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) { |
| 879 | gen8_pde_t *const page_directory = kmap_atomic(pd->page); | 956 | gen8_pde_t *const page_directory = kmap_px(pd); |
| 880 | struct i915_page_table *pt; | 957 | struct i915_page_table *pt; |
| 881 | uint64_t pd_len = gen8_clamp_pd(start, length); | 958 | uint64_t pd_len = gen8_clamp_pd(start, length); |
| 882 | uint64_t pd_start = start; | 959 | uint64_t pd_start = start; |
| @@ -897,36 +974,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
| 897 | gen8_pte_count(pd_start, pd_len)); | 974 | gen8_pte_count(pd_start, pd_len)); |
| 898 | 975 | ||
| 899 | /* Our pde is now pointing to the pagetable, pt */ | 976 | /* Our pde is now pointing to the pagetable, pt */ |
| 900 | set_bit(pde, pd->used_pdes); | 977 | __set_bit(pde, pd->used_pdes); |
| 901 | 978 | ||
| 902 | /* Map the PDE to the page table */ | 979 | /* Map the PDE to the page table */ |
| 903 | __gen8_do_map_pt(page_directory + pde, pt, vm->dev); | 980 | page_directory[pde] = gen8_pde_encode(px_dma(pt), |
| 981 | I915_CACHE_LLC); | ||
| 904 | 982 | ||
| 905 | /* NB: We haven't yet mapped ptes to pages. At this | 983 | /* NB: We haven't yet mapped ptes to pages. At this |
| 906 | * point we're still relying on insert_entries() */ | 984 | * point we're still relying on insert_entries() */ |
| 907 | } | 985 | } |
| 908 | 986 | ||
| 909 | if (!HAS_LLC(vm->dev)) | 987 | kunmap_px(ppgtt, page_directory); |
| 910 | drm_clflush_virt_range(page_directory, PAGE_SIZE); | ||
| 911 | 988 | ||
| 912 | kunmap_atomic(page_directory); | 989 | __set_bit(pdpe, ppgtt->pdp.used_pdpes); |
| 913 | |||
| 914 | set_bit(pdpe, ppgtt->pdp.used_pdpes); | ||
| 915 | } | 990 | } |
| 916 | 991 | ||
| 917 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); | 992 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
| 993 | mark_tlbs_dirty(ppgtt); | ||
| 918 | return 0; | 994 | return 0; |
| 919 | 995 | ||
| 920 | err_out: | 996 | err_out: |
| 921 | while (pdpe--) { | 997 | while (pdpe--) { |
| 922 | for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES) | 998 | for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES) |
| 923 | unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev); | 999 | free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]); |
| 924 | } | 1000 | } |
| 925 | 1001 | ||
| 926 | for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES) | 1002 | for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES) |
| 927 | unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev); | 1003 | free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]); |
| 928 | 1004 | ||
| 929 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); | 1005 | free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); |
| 1006 | mark_tlbs_dirty(ppgtt); | ||
| 930 | return ret; | 1007 | return ret; |
| 931 | } | 1008 | } |
| 932 | 1009 | ||
| @@ -939,16 +1016,11 @@ err_out: | |||
| 939 | */ | 1016 | */ |
| 940 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | 1017 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
| 941 | { | 1018 | { |
| 942 | ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); | 1019 | int ret; |
| 943 | if (IS_ERR(ppgtt->scratch_pt)) | ||
| 944 | return PTR_ERR(ppgtt->scratch_pt); | ||
| 945 | |||
| 946 | ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev); | ||
| 947 | if (IS_ERR(ppgtt->scratch_pd)) | ||
| 948 | return PTR_ERR(ppgtt->scratch_pd); | ||
| 949 | 1020 | ||
| 950 | gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); | 1021 | ret = gen8_init_scratch(&ppgtt->base); |
| 951 | gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd); | 1022 | if (ret) |
| 1023 | return ret; | ||
| 952 | 1024 | ||
| 953 | ppgtt->base.start = 0; | 1025 | ppgtt->base.start = 0; |
| 954 | ppgtt->base.total = 1ULL << 32; | 1026 | ppgtt->base.total = 1ULL << 32; |
| @@ -980,12 +1052,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |||
| 980 | uint32_t pte, pde, temp; | 1052 | uint32_t pte, pde, temp; |
| 981 | uint32_t start = ppgtt->base.start, length = ppgtt->base.total; | 1053 | uint32_t start = ppgtt->base.start, length = ppgtt->base.total; |
| 982 | 1054 | ||
| 983 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); | 1055 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
| 1056 | I915_CACHE_LLC, true, 0); | ||
| 984 | 1057 | ||
| 985 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { | 1058 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { |
| 986 | u32 expected; | 1059 | u32 expected; |
| 987 | gen6_pte_t *pt_vaddr; | 1060 | gen6_pte_t *pt_vaddr; |
| 988 | dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; | 1061 | const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); |
| 989 | pd_entry = readl(ppgtt->pd_addr + pde); | 1062 | pd_entry = readl(ppgtt->pd_addr + pde); |
| 990 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); | 1063 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); |
| 991 | 1064 | ||
| @@ -996,7 +1069,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |||
| 996 | expected); | 1069 | expected); |
| 997 | seq_printf(m, "\tPDE: %x\n", pd_entry); | 1070 | seq_printf(m, "\tPDE: %x\n", pd_entry); |
| 998 | 1071 | ||
| 999 | pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page); | 1072 | pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]); |
| 1073 | |||
| 1000 | for (pte = 0; pte < GEN6_PTES; pte+=4) { | 1074 | for (pte = 0; pte < GEN6_PTES; pte+=4) { |
| 1001 | unsigned long va = | 1075 | unsigned long va = |
| 1002 | (pde * PAGE_SIZE * GEN6_PTES) + | 1076 | (pde * PAGE_SIZE * GEN6_PTES) + |
| @@ -1018,7 +1092,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |||
| 1018 | } | 1092 | } |
| 1019 | seq_puts(m, "\n"); | 1093 | seq_puts(m, "\n"); |
| 1020 | } | 1094 | } |
| 1021 | kunmap_atomic(pt_vaddr); | 1095 | kunmap_px(ppgtt, pt_vaddr); |
| 1022 | } | 1096 | } |
| 1023 | } | 1097 | } |
| 1024 | 1098 | ||
| @@ -1031,7 +1105,7 @@ static void gen6_write_pde(struct i915_page_directory *pd, | |||
| 1031 | container_of(pd, struct i915_hw_ppgtt, pd); | 1105 | container_of(pd, struct i915_hw_ppgtt, pd); |
| 1032 | u32 pd_entry; | 1106 | u32 pd_entry; |
| 1033 | 1107 | ||
| 1034 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr); | 1108 | pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt)); |
| 1035 | pd_entry |= GEN6_PDE_VALID; | 1109 | pd_entry |= GEN6_PDE_VALID; |
| 1036 | 1110 | ||
| 1037 | writel(pd_entry, ppgtt->pd_addr + pde); | 1111 | writel(pd_entry, ppgtt->pd_addr + pde); |
| @@ -1056,22 +1130,23 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv, | |||
| 1056 | 1130 | ||
| 1057 | static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) | 1131 | static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
| 1058 | { | 1132 | { |
| 1059 | BUG_ON(ppgtt->pd.pd_offset & 0x3f); | 1133 | BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); |
| 1060 | 1134 | ||
| 1061 | return (ppgtt->pd.pd_offset / 64) << 16; | 1135 | return (ppgtt->pd.base.ggtt_offset / 64) << 16; |
| 1062 | } | 1136 | } |
| 1063 | 1137 | ||
| 1064 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1138 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
| 1065 | struct intel_engine_cs *ring) | 1139 | struct drm_i915_gem_request *req) |
| 1066 | { | 1140 | { |
| 1141 | struct intel_engine_cs *ring = req->ring; | ||
| 1067 | int ret; | 1142 | int ret; |
| 1068 | 1143 | ||
| 1069 | /* NB: TLBs must be flushed and invalidated before a switch */ | 1144 | /* NB: TLBs must be flushed and invalidated before a switch */ |
| 1070 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 1145 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
| 1071 | if (ret) | 1146 | if (ret) |
| 1072 | return ret; | 1147 | return ret; |
| 1073 | 1148 | ||
| 1074 | ret = intel_ring_begin(ring, 6); | 1149 | ret = intel_ring_begin(req, 6); |
| 1075 | if (ret) | 1150 | if (ret) |
| 1076 | return ret; | 1151 | return ret; |
| 1077 | 1152 | ||
| @@ -1087,8 +1162,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
| 1087 | } | 1162 | } |
| 1088 | 1163 | ||
| 1089 | static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1164 | static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, |
| 1090 | struct intel_engine_cs *ring) | 1165 | struct drm_i915_gem_request *req) |
| 1091 | { | 1166 | { |
| 1167 | struct intel_engine_cs *ring = req->ring; | ||
| 1092 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); | 1168 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); |
| 1093 | 1169 | ||
| 1094 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | 1170 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
| @@ -1097,16 +1173,17 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
| 1097 | } | 1173 | } |
| 1098 | 1174 | ||
| 1099 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1175 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
| 1100 | struct intel_engine_cs *ring) | 1176 | struct drm_i915_gem_request *req) |
| 1101 | { | 1177 | { |
| 1178 | struct intel_engine_cs *ring = req->ring; | ||
| 1102 | int ret; | 1179 | int ret; |
| 1103 | 1180 | ||
| 1104 | /* NB: TLBs must be flushed and invalidated before a switch */ | 1181 | /* NB: TLBs must be flushed and invalidated before a switch */ |
| 1105 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 1182 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
| 1106 | if (ret) | 1183 | if (ret) |
| 1107 | return ret; | 1184 | return ret; |
| 1108 | 1185 | ||
| 1109 | ret = intel_ring_begin(ring, 6); | 1186 | ret = intel_ring_begin(req, 6); |
| 1110 | if (ret) | 1187 | if (ret) |
| 1111 | return ret; | 1188 | return ret; |
| 1112 | 1189 | ||
| @@ -1120,7 +1197,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
| 1120 | 1197 | ||
| 1121 | /* XXX: RCS is the only one to auto invalidate the TLBs? */ | 1198 | /* XXX: RCS is the only one to auto invalidate the TLBs? */ |
| 1122 | if (ring->id != RCS) { | 1199 | if (ring->id != RCS) { |
| 1123 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 1200 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
| 1124 | if (ret) | 1201 | if (ret) |
| 1125 | return ret; | 1202 | return ret; |
| 1126 | } | 1203 | } |
| @@ -1129,8 +1206,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
| 1129 | } | 1206 | } |
| 1130 | 1207 | ||
| 1131 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1208 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
| 1132 | struct intel_engine_cs *ring) | 1209 | struct drm_i915_gem_request *req) |
| 1133 | { | 1210 | { |
| 1211 | struct intel_engine_cs *ring = req->ring; | ||
| 1134 | struct drm_device *dev = ppgtt->base.dev; | 1212 | struct drm_device *dev = ppgtt->base.dev; |
| 1135 | struct drm_i915_private *dev_priv = dev->dev_private; | 1213 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1136 | 1214 | ||
| @@ -1214,19 +1292,20 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | |||
| 1214 | unsigned first_pte = first_entry % GEN6_PTES; | 1292 | unsigned first_pte = first_entry % GEN6_PTES; |
| 1215 | unsigned last_pte, i; | 1293 | unsigned last_pte, i; |
| 1216 | 1294 | ||
| 1217 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); | 1295 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
| 1296 | I915_CACHE_LLC, true, 0); | ||
| 1218 | 1297 | ||
| 1219 | while (num_entries) { | 1298 | while (num_entries) { |
| 1220 | last_pte = first_pte + num_entries; | 1299 | last_pte = first_pte + num_entries; |
| 1221 | if (last_pte > GEN6_PTES) | 1300 | if (last_pte > GEN6_PTES) |
| 1222 | last_pte = GEN6_PTES; | 1301 | last_pte = GEN6_PTES; |
| 1223 | 1302 | ||
| 1224 | pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); | 1303 | pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); |
| 1225 | 1304 | ||
| 1226 | for (i = first_pte; i < last_pte; i++) | 1305 | for (i = first_pte; i < last_pte; i++) |
| 1227 | pt_vaddr[i] = scratch_pte; | 1306 | pt_vaddr[i] = scratch_pte; |
| 1228 | 1307 | ||
| 1229 | kunmap_atomic(pt_vaddr); | 1308 | kunmap_px(ppgtt, pt_vaddr); |
| 1230 | 1309 | ||
| 1231 | num_entries -= last_pte - first_pte; | 1310 | num_entries -= last_pte - first_pte; |
| 1232 | first_pte = 0; | 1311 | first_pte = 0; |
| @@ -1250,54 +1329,25 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, | |||
| 1250 | pt_vaddr = NULL; | 1329 | pt_vaddr = NULL; |
| 1251 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { | 1330 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
| 1252 | if (pt_vaddr == NULL) | 1331 | if (pt_vaddr == NULL) |
| 1253 | pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); | 1332 | pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); |
| 1254 | 1333 | ||
| 1255 | pt_vaddr[act_pte] = | 1334 | pt_vaddr[act_pte] = |
| 1256 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), | 1335 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
| 1257 | cache_level, true, flags); | 1336 | cache_level, true, flags); |
| 1258 | 1337 | ||
| 1259 | if (++act_pte == GEN6_PTES) { | 1338 | if (++act_pte == GEN6_PTES) { |
| 1260 | kunmap_atomic(pt_vaddr); | 1339 | kunmap_px(ppgtt, pt_vaddr); |
| 1261 | pt_vaddr = NULL; | 1340 | pt_vaddr = NULL; |
| 1262 | act_pt++; | 1341 | act_pt++; |
| 1263 | act_pte = 0; | 1342 | act_pte = 0; |
| 1264 | } | 1343 | } |
| 1265 | } | 1344 | } |
| 1266 | if (pt_vaddr) | 1345 | if (pt_vaddr) |
| 1267 | kunmap_atomic(pt_vaddr); | 1346 | kunmap_px(ppgtt, pt_vaddr); |
| 1268 | } | ||
| 1269 | |||
| 1270 | /* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we | ||
| 1271 | * are switching between contexts with the same LRCA, we also must do a force | ||
| 1272 | * restore. | ||
| 1273 | */ | ||
| 1274 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | ||
| 1275 | { | ||
| 1276 | /* If current vm != vm, */ | ||
| 1277 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; | ||
| 1278 | } | ||
| 1279 | |||
| 1280 | static void gen6_initialize_pt(struct i915_address_space *vm, | ||
| 1281 | struct i915_page_table *pt) | ||
| 1282 | { | ||
| 1283 | gen6_pte_t *pt_vaddr, scratch_pte; | ||
| 1284 | int i; | ||
| 1285 | |||
| 1286 | WARN_ON(vm->scratch.addr == 0); | ||
| 1287 | |||
| 1288 | scratch_pte = vm->pte_encode(vm->scratch.addr, | ||
| 1289 | I915_CACHE_LLC, true, 0); | ||
| 1290 | |||
| 1291 | pt_vaddr = kmap_atomic(pt->page); | ||
| 1292 | |||
| 1293 | for (i = 0; i < GEN6_PTES; i++) | ||
| 1294 | pt_vaddr[i] = scratch_pte; | ||
| 1295 | |||
| 1296 | kunmap_atomic(pt_vaddr); | ||
| 1297 | } | 1347 | } |
| 1298 | 1348 | ||
| 1299 | static int gen6_alloc_va_range(struct i915_address_space *vm, | 1349 | static int gen6_alloc_va_range(struct i915_address_space *vm, |
| 1300 | uint64_t start, uint64_t length) | 1350 | uint64_t start_in, uint64_t length_in) |
| 1301 | { | 1351 | { |
| 1302 | DECLARE_BITMAP(new_page_tables, I915_PDES); | 1352 | DECLARE_BITMAP(new_page_tables, I915_PDES); |
| 1303 | struct drm_device *dev = vm->dev; | 1353 | struct drm_device *dev = vm->dev; |
| @@ -1305,11 +1355,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
| 1305 | struct i915_hw_ppgtt *ppgtt = | 1355 | struct i915_hw_ppgtt *ppgtt = |
| 1306 | container_of(vm, struct i915_hw_ppgtt, base); | 1356 | container_of(vm, struct i915_hw_ppgtt, base); |
| 1307 | struct i915_page_table *pt; | 1357 | struct i915_page_table *pt; |
| 1308 | const uint32_t start_save = start, length_save = length; | 1358 | uint32_t start, length, start_save, length_save; |
| 1309 | uint32_t pde, temp; | 1359 | uint32_t pde, temp; |
| 1310 | int ret; | 1360 | int ret; |
| 1311 | 1361 | ||
| 1312 | WARN_ON(upper_32_bits(start)); | 1362 | if (WARN_ON(start_in + length_in > ppgtt->base.total)) |
| 1363 | return -ENODEV; | ||
| 1364 | |||
| 1365 | start = start_save = start_in; | ||
| 1366 | length = length_save = length_in; | ||
| 1313 | 1367 | ||
| 1314 | bitmap_zero(new_page_tables, I915_PDES); | 1368 | bitmap_zero(new_page_tables, I915_PDES); |
| 1315 | 1369 | ||
| @@ -1319,7 +1373,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
| 1319 | * tables. | 1373 | * tables. |
| 1320 | */ | 1374 | */ |
| 1321 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { | 1375 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { |
| 1322 | if (pt != ppgtt->scratch_pt) { | 1376 | if (pt != vm->scratch_pt) { |
| 1323 | WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); | 1377 | WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); |
| 1324 | continue; | 1378 | continue; |
| 1325 | } | 1379 | } |
| @@ -1327,7 +1381,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
| 1327 | /* We've already allocated a page table */ | 1381 | /* We've already allocated a page table */ |
| 1328 | WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); | 1382 | WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); |
| 1329 | 1383 | ||
| 1330 | pt = alloc_pt_single(dev); | 1384 | pt = alloc_pt(dev); |
| 1331 | if (IS_ERR(pt)) { | 1385 | if (IS_ERR(pt)) { |
| 1332 | ret = PTR_ERR(pt); | 1386 | ret = PTR_ERR(pt); |
| 1333 | goto unwind_out; | 1387 | goto unwind_out; |
| @@ -1336,7 +1390,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
| 1336 | gen6_initialize_pt(vm, pt); | 1390 | gen6_initialize_pt(vm, pt); |
| 1337 | 1391 | ||
| 1338 | ppgtt->pd.page_table[pde] = pt; | 1392 | ppgtt->pd.page_table[pde] = pt; |
| 1339 | set_bit(pde, new_page_tables); | 1393 | __set_bit(pde, new_page_tables); |
| 1340 | trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); | 1394 | trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); |
| 1341 | } | 1395 | } |
| 1342 | 1396 | ||
| @@ -1350,7 +1404,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
| 1350 | bitmap_set(tmp_bitmap, gen6_pte_index(start), | 1404 | bitmap_set(tmp_bitmap, gen6_pte_index(start), |
| 1351 | gen6_pte_count(start, length)); | 1405 | gen6_pte_count(start, length)); |
| 1352 | 1406 | ||
| 1353 | if (test_and_clear_bit(pde, new_page_tables)) | 1407 | if (__test_and_clear_bit(pde, new_page_tables)) |
| 1354 | gen6_write_pde(&ppgtt->pd, pde, pt); | 1408 | gen6_write_pde(&ppgtt->pd, pde, pt); |
| 1355 | 1409 | ||
| 1356 | trace_i915_page_table_entry_map(vm, pde, pt, | 1410 | trace_i915_page_table_entry_map(vm, pde, pt, |
| @@ -1374,14 +1428,41 @@ unwind_out: | |||
| 1374 | for_each_set_bit(pde, new_page_tables, I915_PDES) { | 1428 | for_each_set_bit(pde, new_page_tables, I915_PDES) { |
| 1375 | struct i915_page_table *pt = ppgtt->pd.page_table[pde]; | 1429 | struct i915_page_table *pt = ppgtt->pd.page_table[pde]; |
| 1376 | 1430 | ||
| 1377 | ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; | 1431 | ppgtt->pd.page_table[pde] = vm->scratch_pt; |
| 1378 | unmap_and_free_pt(pt, vm->dev); | 1432 | free_pt(vm->dev, pt); |
| 1379 | } | 1433 | } |
| 1380 | 1434 | ||
| 1381 | mark_tlbs_dirty(ppgtt); | 1435 | mark_tlbs_dirty(ppgtt); |
| 1382 | return ret; | 1436 | return ret; |
| 1383 | } | 1437 | } |
| 1384 | 1438 | ||
| 1439 | static int gen6_init_scratch(struct i915_address_space *vm) | ||
| 1440 | { | ||
| 1441 | struct drm_device *dev = vm->dev; | ||
| 1442 | |||
| 1443 | vm->scratch_page = alloc_scratch_page(dev); | ||
| 1444 | if (IS_ERR(vm->scratch_page)) | ||
| 1445 | return PTR_ERR(vm->scratch_page); | ||
| 1446 | |||
| 1447 | vm->scratch_pt = alloc_pt(dev); | ||
| 1448 | if (IS_ERR(vm->scratch_pt)) { | ||
| 1449 | free_scratch_page(dev, vm->scratch_page); | ||
| 1450 | return PTR_ERR(vm->scratch_pt); | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | gen6_initialize_pt(vm, vm->scratch_pt); | ||
| 1454 | |||
| 1455 | return 0; | ||
| 1456 | } | ||
| 1457 | |||
| 1458 | static void gen6_free_scratch(struct i915_address_space *vm) | ||
| 1459 | { | ||
| 1460 | struct drm_device *dev = vm->dev; | ||
| 1461 | |||
| 1462 | free_pt(dev, vm->scratch_pt); | ||
| 1463 | free_scratch_page(dev, vm->scratch_page); | ||
| 1464 | } | ||
| 1465 | |||
| 1385 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | 1466 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
| 1386 | { | 1467 | { |
| 1387 | struct i915_hw_ppgtt *ppgtt = | 1468 | struct i915_hw_ppgtt *ppgtt = |
| @@ -1389,20 +1470,19 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | |||
| 1389 | struct i915_page_table *pt; | 1470 | struct i915_page_table *pt; |
| 1390 | uint32_t pde; | 1471 | uint32_t pde; |
| 1391 | 1472 | ||
| 1392 | |||
| 1393 | drm_mm_remove_node(&ppgtt->node); | 1473 | drm_mm_remove_node(&ppgtt->node); |
| 1394 | 1474 | ||
| 1395 | gen6_for_all_pdes(pt, ppgtt, pde) { | 1475 | gen6_for_all_pdes(pt, ppgtt, pde) { |
| 1396 | if (pt != ppgtt->scratch_pt) | 1476 | if (pt != vm->scratch_pt) |
| 1397 | unmap_and_free_pt(pt, ppgtt->base.dev); | 1477 | free_pt(ppgtt->base.dev, pt); |
| 1398 | } | 1478 | } |
| 1399 | 1479 | ||
| 1400 | unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); | 1480 | gen6_free_scratch(vm); |
| 1401 | unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev); | ||
| 1402 | } | 1481 | } |
| 1403 | 1482 | ||
| 1404 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) | 1483 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
| 1405 | { | 1484 | { |
| 1485 | struct i915_address_space *vm = &ppgtt->base; | ||
| 1406 | struct drm_device *dev = ppgtt->base.dev; | 1486 | struct drm_device *dev = ppgtt->base.dev; |
| 1407 | struct drm_i915_private *dev_priv = dev->dev_private; | 1487 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1408 | bool retried = false; | 1488 | bool retried = false; |
| @@ -1413,11 +1493,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) | |||
| 1413 | * size. We allocate at the top of the GTT to avoid fragmentation. | 1493 | * size. We allocate at the top of the GTT to avoid fragmentation. |
| 1414 | */ | 1494 | */ |
| 1415 | BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); | 1495 | BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); |
| 1416 | ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); | ||
| 1417 | if (IS_ERR(ppgtt->scratch_pt)) | ||
| 1418 | return PTR_ERR(ppgtt->scratch_pt); | ||
| 1419 | 1496 | ||
| 1420 | gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); | 1497 | ret = gen6_init_scratch(vm); |
| 1498 | if (ret) | ||
| 1499 | return ret; | ||
| 1421 | 1500 | ||
| 1422 | alloc: | 1501 | alloc: |
| 1423 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, | 1502 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, |
| @@ -1448,7 +1527,7 @@ alloc: | |||
| 1448 | return 0; | 1527 | return 0; |
| 1449 | 1528 | ||
| 1450 | err_out: | 1529 | err_out: |
| 1451 | unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); | 1530 | gen6_free_scratch(vm); |
| 1452 | return ret; | 1531 | return ret; |
| 1453 | } | 1532 | } |
| 1454 | 1533 | ||
| @@ -1464,7 +1543,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, | |||
| 1464 | uint32_t pde, temp; | 1543 | uint32_t pde, temp; |
| 1465 | 1544 | ||
| 1466 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) | 1545 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) |
| 1467 | ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; | 1546 | ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; |
| 1468 | } | 1547 | } |
| 1469 | 1548 | ||
| 1470 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | 1549 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
| @@ -1500,11 +1579,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
| 1500 | ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; | 1579 | ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; |
| 1501 | ppgtt->debug_dump = gen6_dump_ppgtt; | 1580 | ppgtt->debug_dump = gen6_dump_ppgtt; |
| 1502 | 1581 | ||
| 1503 | ppgtt->pd.pd_offset = | 1582 | ppgtt->pd.base.ggtt_offset = |
| 1504 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); | 1583 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); |
| 1505 | 1584 | ||
| 1506 | ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + | 1585 | ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + |
| 1507 | ppgtt->pd.pd_offset / sizeof(gen6_pte_t); | 1586 | ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); |
| 1508 | 1587 | ||
| 1509 | gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); | 1588 | gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); |
| 1510 | 1589 | ||
| @@ -1515,23 +1594,21 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
| 1515 | ppgtt->node.start / PAGE_SIZE); | 1594 | ppgtt->node.start / PAGE_SIZE); |
| 1516 | 1595 | ||
| 1517 | DRM_DEBUG("Adding PPGTT at offset %x\n", | 1596 | DRM_DEBUG("Adding PPGTT at offset %x\n", |
| 1518 | ppgtt->pd.pd_offset << 10); | 1597 | ppgtt->pd.base.ggtt_offset << 10); |
| 1519 | 1598 | ||
| 1520 | return 0; | 1599 | return 0; |
| 1521 | } | 1600 | } |
| 1522 | 1601 | ||
| 1523 | static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) | 1602 | static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
| 1524 | { | 1603 | { |
| 1525 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1526 | |||
| 1527 | ppgtt->base.dev = dev; | 1604 | ppgtt->base.dev = dev; |
| 1528 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; | ||
| 1529 | 1605 | ||
| 1530 | if (INTEL_INFO(dev)->gen < 8) | 1606 | if (INTEL_INFO(dev)->gen < 8) |
| 1531 | return gen6_ppgtt_init(ppgtt); | 1607 | return gen6_ppgtt_init(ppgtt); |
| 1532 | else | 1608 | else |
| 1533 | return gen8_ppgtt_init(ppgtt); | 1609 | return gen8_ppgtt_init(ppgtt); |
| 1534 | } | 1610 | } |
| 1611 | |||
| 1535 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) | 1612 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
| 1536 | { | 1613 | { |
| 1537 | struct drm_i915_private *dev_priv = dev->dev_private; | 1614 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -1550,11 +1627,6 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) | |||
| 1550 | 1627 | ||
| 1551 | int i915_ppgtt_init_hw(struct drm_device *dev) | 1628 | int i915_ppgtt_init_hw(struct drm_device *dev) |
| 1552 | { | 1629 | { |
| 1553 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1554 | struct intel_engine_cs *ring; | ||
| 1555 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
| 1556 | int i, ret = 0; | ||
| 1557 | |||
| 1558 | /* In the case of execlists, PPGTT is enabled by the context descriptor | 1630 | /* In the case of execlists, PPGTT is enabled by the context descriptor |
| 1559 | * and the PDPs are contained within the context itself. We don't | 1631 | * and the PDPs are contained within the context itself. We don't |
| 1560 | * need to do anything here. */ | 1632 | * need to do anything here. */ |
| @@ -1573,16 +1645,23 @@ int i915_ppgtt_init_hw(struct drm_device *dev) | |||
| 1573 | else | 1645 | else |
| 1574 | MISSING_CASE(INTEL_INFO(dev)->gen); | 1646 | MISSING_CASE(INTEL_INFO(dev)->gen); |
| 1575 | 1647 | ||
| 1576 | if (ppgtt) { | 1648 | return 0; |
| 1577 | for_each_ring(ring, dev_priv, i) { | 1649 | } |
| 1578 | ret = ppgtt->switch_mm(ppgtt, ring); | ||
| 1579 | if (ret != 0) | ||
| 1580 | return ret; | ||
| 1581 | } | ||
| 1582 | } | ||
| 1583 | 1650 | ||
| 1584 | return ret; | 1651 | int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) |
| 1652 | { | ||
| 1653 | struct drm_i915_private *dev_priv = req->ring->dev->dev_private; | ||
| 1654 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
| 1655 | |||
| 1656 | if (i915.enable_execlists) | ||
| 1657 | return 0; | ||
| 1658 | |||
| 1659 | if (!ppgtt) | ||
| 1660 | return 0; | ||
| 1661 | |||
| 1662 | return ppgtt->switch_mm(ppgtt, req); | ||
| 1585 | } | 1663 | } |
| 1664 | |||
| 1586 | struct i915_hw_ppgtt * | 1665 | struct i915_hw_ppgtt * |
| 1587 | i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) | 1666 | i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) |
| 1588 | { | 1667 | { |
| @@ -1843,7 +1922,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, | |||
| 1843 | first_entry, num_entries, max_entries)) | 1922 | first_entry, num_entries, max_entries)) |
| 1844 | num_entries = max_entries; | 1923 | num_entries = max_entries; |
| 1845 | 1924 | ||
| 1846 | scratch_pte = gen8_pte_encode(vm->scratch.addr, | 1925 | scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), |
| 1847 | I915_CACHE_LLC, | 1926 | I915_CACHE_LLC, |
| 1848 | use_scratch); | 1927 | use_scratch); |
| 1849 | for (i = 0; i < num_entries; i++) | 1928 | for (i = 0; i < num_entries; i++) |
| @@ -1869,7 +1948,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, | |||
| 1869 | first_entry, num_entries, max_entries)) | 1948 | first_entry, num_entries, max_entries)) |
| 1870 | num_entries = max_entries; | 1949 | num_entries = max_entries; |
| 1871 | 1950 | ||
| 1872 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); | 1951 | scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), |
| 1952 | I915_CACHE_LLC, use_scratch, 0); | ||
| 1873 | 1953 | ||
| 1874 | for (i = 0; i < num_entries; i++) | 1954 | for (i = 0; i < num_entries; i++) |
| 1875 | iowrite32(scratch_pte, >t_base[i]); | 1955 | iowrite32(scratch_pte, >t_base[i]); |
| @@ -2094,7 +2174,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, | |||
| 2094 | void i915_gem_init_global_gtt(struct drm_device *dev) | 2174 | void i915_gem_init_global_gtt(struct drm_device *dev) |
| 2095 | { | 2175 | { |
| 2096 | struct drm_i915_private *dev_priv = dev->dev_private; | 2176 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2097 | unsigned long gtt_size, mappable_size; | 2177 | u64 gtt_size, mappable_size; |
| 2098 | 2178 | ||
| 2099 | gtt_size = dev_priv->gtt.base.total; | 2179 | gtt_size = dev_priv->gtt.base.total; |
| 2100 | mappable_size = dev_priv->gtt.mappable_end; | 2180 | mappable_size = dev_priv->gtt.mappable_end; |
| @@ -2124,42 +2204,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev) | |||
| 2124 | vm->cleanup(vm); | 2204 | vm->cleanup(vm); |
| 2125 | } | 2205 | } |
| 2126 | 2206 | ||
| 2127 | static int setup_scratch_page(struct drm_device *dev) | ||
| 2128 | { | ||
| 2129 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2130 | struct page *page; | ||
| 2131 | dma_addr_t dma_addr; | ||
| 2132 | |||
| 2133 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); | ||
| 2134 | if (page == NULL) | ||
| 2135 | return -ENOMEM; | ||
| 2136 | set_pages_uc(page, 1); | ||
| 2137 | |||
| 2138 | #ifdef CONFIG_INTEL_IOMMU | ||
| 2139 | dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, | ||
| 2140 | PCI_DMA_BIDIRECTIONAL); | ||
| 2141 | if (pci_dma_mapping_error(dev->pdev, dma_addr)) | ||
| 2142 | return -EINVAL; | ||
| 2143 | #else | ||
| 2144 | dma_addr = page_to_phys(page); | ||
| 2145 | #endif | ||
| 2146 | dev_priv->gtt.base.scratch.page = page; | ||
| 2147 | dev_priv->gtt.base.scratch.addr = dma_addr; | ||
| 2148 | |||
| 2149 | return 0; | ||
| 2150 | } | ||
| 2151 | |||
| 2152 | static void teardown_scratch_page(struct drm_device *dev) | ||
| 2153 | { | ||
| 2154 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2155 | struct page *page = dev_priv->gtt.base.scratch.page; | ||
| 2156 | |||
| 2157 | set_pages_wb(page, 1); | ||
| 2158 | pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, | ||
| 2159 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
| 2160 | __free_page(page); | ||
| 2161 | } | ||
| 2162 | |||
| 2163 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) | 2207 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
| 2164 | { | 2208 | { |
| 2165 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; | 2209 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; |
| @@ -2242,8 +2286,8 @@ static int ggtt_probe_common(struct drm_device *dev, | |||
| 2242 | size_t gtt_size) | 2286 | size_t gtt_size) |
| 2243 | { | 2287 | { |
| 2244 | struct drm_i915_private *dev_priv = dev->dev_private; | 2288 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2289 | struct i915_page_scratch *scratch_page; | ||
| 2245 | phys_addr_t gtt_phys_addr; | 2290 | phys_addr_t gtt_phys_addr; |
| 2246 | int ret; | ||
| 2247 | 2291 | ||
| 2248 | /* For Modern GENs the PTEs and register space are split in the BAR */ | 2292 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
| 2249 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + | 2293 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + |
| @@ -2265,14 +2309,17 @@ static int ggtt_probe_common(struct drm_device *dev, | |||
| 2265 | return -ENOMEM; | 2309 | return -ENOMEM; |
| 2266 | } | 2310 | } |
| 2267 | 2311 | ||
| 2268 | ret = setup_scratch_page(dev); | 2312 | scratch_page = alloc_scratch_page(dev); |
| 2269 | if (ret) { | 2313 | if (IS_ERR(scratch_page)) { |
| 2270 | DRM_ERROR("Scratch setup failed\n"); | 2314 | DRM_ERROR("Scratch setup failed\n"); |
| 2271 | /* iounmap will also get called at remove, but meh */ | 2315 | /* iounmap will also get called at remove, but meh */ |
| 2272 | iounmap(dev_priv->gtt.gsm); | 2316 | iounmap(dev_priv->gtt.gsm); |
| 2317 | return PTR_ERR(scratch_page); | ||
| 2273 | } | 2318 | } |
| 2274 | 2319 | ||
| 2275 | return ret; | 2320 | dev_priv->gtt.base.scratch_page = scratch_page; |
| 2321 | |||
| 2322 | return 0; | ||
| 2276 | } | 2323 | } |
| 2277 | 2324 | ||
| 2278 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability | 2325 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
| @@ -2349,13 +2396,13 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) | |||
| 2349 | } | 2396 | } |
| 2350 | 2397 | ||
| 2351 | static int gen8_gmch_probe(struct drm_device *dev, | 2398 | static int gen8_gmch_probe(struct drm_device *dev, |
| 2352 | size_t *gtt_total, | 2399 | u64 *gtt_total, |
| 2353 | size_t *stolen, | 2400 | size_t *stolen, |
| 2354 | phys_addr_t *mappable_base, | 2401 | phys_addr_t *mappable_base, |
| 2355 | unsigned long *mappable_end) | 2402 | u64 *mappable_end) |
| 2356 | { | 2403 | { |
| 2357 | struct drm_i915_private *dev_priv = dev->dev_private; | 2404 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2358 | unsigned int gtt_size; | 2405 | u64 gtt_size; |
| 2359 | u16 snb_gmch_ctl; | 2406 | u16 snb_gmch_ctl; |
| 2360 | int ret; | 2407 | int ret; |
| 2361 | 2408 | ||
| @@ -2397,10 +2444,10 @@ static int gen8_gmch_probe(struct drm_device *dev, | |||
| 2397 | } | 2444 | } |
| 2398 | 2445 | ||
| 2399 | static int gen6_gmch_probe(struct drm_device *dev, | 2446 | static int gen6_gmch_probe(struct drm_device *dev, |
| 2400 | size_t *gtt_total, | 2447 | u64 *gtt_total, |
| 2401 | size_t *stolen, | 2448 | size_t *stolen, |
| 2402 | phys_addr_t *mappable_base, | 2449 | phys_addr_t *mappable_base, |
| 2403 | unsigned long *mappable_end) | 2450 | u64 *mappable_end) |
| 2404 | { | 2451 | { |
| 2405 | struct drm_i915_private *dev_priv = dev->dev_private; | 2452 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2406 | unsigned int gtt_size; | 2453 | unsigned int gtt_size; |
| @@ -2414,7 +2461,7 @@ static int gen6_gmch_probe(struct drm_device *dev, | |||
| 2414 | * a coarse sanity check. | 2461 | * a coarse sanity check. |
| 2415 | */ | 2462 | */ |
| 2416 | if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { | 2463 | if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { |
| 2417 | DRM_ERROR("Unknown GMADR size (%lx)\n", | 2464 | DRM_ERROR("Unknown GMADR size (%llx)\n", |
| 2418 | dev_priv->gtt.mappable_end); | 2465 | dev_priv->gtt.mappable_end); |
| 2419 | return -ENXIO; | 2466 | return -ENXIO; |
| 2420 | } | 2467 | } |
| @@ -2444,14 +2491,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm) | |||
| 2444 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); | 2491 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
| 2445 | 2492 | ||
| 2446 | iounmap(gtt->gsm); | 2493 | iounmap(gtt->gsm); |
| 2447 | teardown_scratch_page(vm->dev); | 2494 | free_scratch_page(vm->dev, vm->scratch_page); |
| 2448 | } | 2495 | } |
| 2449 | 2496 | ||
| 2450 | static int i915_gmch_probe(struct drm_device *dev, | 2497 | static int i915_gmch_probe(struct drm_device *dev, |
| 2451 | size_t *gtt_total, | 2498 | u64 *gtt_total, |
| 2452 | size_t *stolen, | 2499 | size_t *stolen, |
| 2453 | phys_addr_t *mappable_base, | 2500 | phys_addr_t *mappable_base, |
| 2454 | unsigned long *mappable_end) | 2501 | u64 *mappable_end) |
| 2455 | { | 2502 | { |
| 2456 | struct drm_i915_private *dev_priv = dev->dev_private; | 2503 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2457 | int ret; | 2504 | int ret; |
| @@ -2508,17 +2555,17 @@ int i915_gem_gtt_init(struct drm_device *dev) | |||
| 2508 | dev_priv->gtt.base.cleanup = gen6_gmch_remove; | 2555 | dev_priv->gtt.base.cleanup = gen6_gmch_remove; |
| 2509 | } | 2556 | } |
| 2510 | 2557 | ||
| 2558 | gtt->base.dev = dev; | ||
| 2559 | |||
| 2511 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, | 2560 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, |
| 2512 | >t->mappable_base, >t->mappable_end); | 2561 | >t->mappable_base, >t->mappable_end); |
| 2513 | if (ret) | 2562 | if (ret) |
| 2514 | return ret; | 2563 | return ret; |
| 2515 | 2564 | ||
| 2516 | gtt->base.dev = dev; | ||
| 2517 | |||
| 2518 | /* GMADR is the PCI mmio aperture into the global GTT. */ | 2565 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
| 2519 | DRM_INFO("Memory usable by graphics device = %zdM\n", | 2566 | DRM_INFO("Memory usable by graphics device = %lluM\n", |
| 2520 | gtt->base.total >> 20); | 2567 | gtt->base.total >> 20); |
| 2521 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); | 2568 | DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20); |
| 2522 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); | 2569 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
| 2523 | #ifdef CONFIG_INTEL_IOMMU | 2570 | #ifdef CONFIG_INTEL_IOMMU |
| 2524 | if (intel_iommu_gfx_mapped) | 2571 | if (intel_iommu_gfx_mapped) |
| @@ -2695,30 +2742,17 @@ static struct sg_table * | |||
| 2695 | intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, | 2742 | intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, |
| 2696 | struct drm_i915_gem_object *obj) | 2743 | struct drm_i915_gem_object *obj) |
| 2697 | { | 2744 | { |
| 2698 | struct drm_device *dev = obj->base.dev; | ||
| 2699 | struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; | 2745 | struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; |
| 2700 | unsigned long size, pages, rot_pages; | 2746 | unsigned int size_pages = rot_info->size >> PAGE_SHIFT; |
| 2701 | struct sg_page_iter sg_iter; | 2747 | struct sg_page_iter sg_iter; |
| 2702 | unsigned long i; | 2748 | unsigned long i; |
| 2703 | dma_addr_t *page_addr_list; | 2749 | dma_addr_t *page_addr_list; |
| 2704 | struct sg_table *st; | 2750 | struct sg_table *st; |
| 2705 | unsigned int tile_pitch, tile_height; | ||
| 2706 | unsigned int width_pages, height_pages; | ||
| 2707 | int ret = -ENOMEM; | 2751 | int ret = -ENOMEM; |
| 2708 | 2752 | ||
| 2709 | pages = obj->base.size / PAGE_SIZE; | ||
| 2710 | |||
| 2711 | /* Calculate tiling geometry. */ | ||
| 2712 | tile_height = intel_tile_height(dev, rot_info->pixel_format, | ||
| 2713 | rot_info->fb_modifier); | ||
| 2714 | tile_pitch = PAGE_SIZE / tile_height; | ||
| 2715 | width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch); | ||
| 2716 | height_pages = DIV_ROUND_UP(rot_info->height, tile_height); | ||
| 2717 | rot_pages = width_pages * height_pages; | ||
| 2718 | size = rot_pages * PAGE_SIZE; | ||
| 2719 | |||
| 2720 | /* Allocate a temporary list of source pages for random access. */ | 2753 | /* Allocate a temporary list of source pages for random access. */ |
| 2721 | page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t)); | 2754 | page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE, |
| 2755 | sizeof(dma_addr_t)); | ||
| 2722 | if (!page_addr_list) | 2756 | if (!page_addr_list) |
| 2723 | return ERR_PTR(ret); | 2757 | return ERR_PTR(ret); |
| 2724 | 2758 | ||
| @@ -2727,7 +2761,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, | |||
| 2727 | if (!st) | 2761 | if (!st) |
| 2728 | goto err_st_alloc; | 2762 | goto err_st_alloc; |
| 2729 | 2763 | ||
| 2730 | ret = sg_alloc_table(st, rot_pages, GFP_KERNEL); | 2764 | ret = sg_alloc_table(st, size_pages, GFP_KERNEL); |
| 2731 | if (ret) | 2765 | if (ret) |
| 2732 | goto err_sg_alloc; | 2766 | goto err_sg_alloc; |
| 2733 | 2767 | ||
| @@ -2739,13 +2773,15 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, | |||
| 2739 | } | 2773 | } |
| 2740 | 2774 | ||
| 2741 | /* Rotate the pages. */ | 2775 | /* Rotate the pages. */ |
| 2742 | rotate_pages(page_addr_list, width_pages, height_pages, st); | 2776 | rotate_pages(page_addr_list, |
| 2777 | rot_info->width_pages, rot_info->height_pages, | ||
| 2778 | st); | ||
| 2743 | 2779 | ||
| 2744 | DRM_DEBUG_KMS( | 2780 | DRM_DEBUG_KMS( |
| 2745 | "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n", | 2781 | "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n", |
| 2746 | size, rot_info->pitch, rot_info->height, | 2782 | obj->base.size, rot_info->pitch, rot_info->height, |
| 2747 | rot_info->pixel_format, width_pages, height_pages, | 2783 | rot_info->pixel_format, rot_info->width_pages, |
| 2748 | rot_pages); | 2784 | rot_info->height_pages, size_pages); |
| 2749 | 2785 | ||
| 2750 | drm_free_large(page_addr_list); | 2786 | drm_free_large(page_addr_list); |
| 2751 | 2787 | ||
| @@ -2757,10 +2793,10 @@ err_st_alloc: | |||
| 2757 | drm_free_large(page_addr_list); | 2793 | drm_free_large(page_addr_list); |
| 2758 | 2794 | ||
| 2759 | DRM_DEBUG_KMS( | 2795 | DRM_DEBUG_KMS( |
| 2760 | "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n", | 2796 | "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n", |
| 2761 | size, ret, rot_info->pitch, rot_info->height, | 2797 | obj->base.size, ret, rot_info->pitch, rot_info->height, |
| 2762 | rot_info->pixel_format, width_pages, height_pages, | 2798 | rot_info->pixel_format, rot_info->width_pages, |
| 2763 | rot_pages); | 2799 | rot_info->height_pages, size_pages); |
| 2764 | return ERR_PTR(ret); | 2800 | return ERR_PTR(ret); |
| 2765 | } | 2801 | } |
| 2766 | 2802 | ||
| @@ -2878,9 +2914,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | |||
| 2878 | vma->node.size, | 2914 | vma->node.size, |
| 2879 | VM_TO_TRACE_NAME(vma->vm)); | 2915 | VM_TO_TRACE_NAME(vma->vm)); |
| 2880 | 2916 | ||
| 2917 | /* XXX: i915_vma_pin() will fix this +- hack */ | ||
| 2918 | vma->pin_count++; | ||
| 2881 | ret = vma->vm->allocate_va_range(vma->vm, | 2919 | ret = vma->vm->allocate_va_range(vma->vm, |
| 2882 | vma->node.start, | 2920 | vma->node.start, |
| 2883 | vma->node.size); | 2921 | vma->node.size); |
| 2922 | vma->pin_count--; | ||
| 2884 | if (ret) | 2923 | if (ret) |
| 2885 | return ret; | 2924 | return ret; |
| 2886 | } | 2925 | } |
| @@ -2905,9 +2944,10 @@ size_t | |||
| 2905 | i915_ggtt_view_size(struct drm_i915_gem_object *obj, | 2944 | i915_ggtt_view_size(struct drm_i915_gem_object *obj, |
| 2906 | const struct i915_ggtt_view *view) | 2945 | const struct i915_ggtt_view *view) |
| 2907 | { | 2946 | { |
| 2908 | if (view->type == I915_GGTT_VIEW_NORMAL || | 2947 | if (view->type == I915_GGTT_VIEW_NORMAL) { |
| 2909 | view->type == I915_GGTT_VIEW_ROTATED) { | ||
| 2910 | return obj->base.size; | 2948 | return obj->base.size; |
| 2949 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { | ||
| 2950 | return view->rotation_info.size; | ||
| 2911 | } else if (view->type == I915_GGTT_VIEW_PARTIAL) { | 2951 | } else if (view->type == I915_GGTT_VIEW_PARTIAL) { |
| 2912 | return view->params.partial.size << PAGE_SHIFT; | 2952 | return view->params.partial.size << PAGE_SHIFT; |
| 2913 | } else { | 2953 | } else { |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 0d46dd20bf71..e1cfa292f9ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
| @@ -126,6 +126,8 @@ struct intel_rotation_info { | |||
| 126 | unsigned int pitch; | 126 | unsigned int pitch; |
| 127 | uint32_t pixel_format; | 127 | uint32_t pixel_format; |
| 128 | uint64_t fb_modifier; | 128 | uint64_t fb_modifier; |
| 129 | unsigned int width_pages, height_pages; | ||
| 130 | uint64_t size; | ||
| 129 | }; | 131 | }; |
| 130 | 132 | ||
| 131 | struct i915_ggtt_view { | 133 | struct i915_ggtt_view { |
| @@ -205,19 +207,34 @@ struct i915_vma { | |||
| 205 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | 207 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
| 206 | }; | 208 | }; |
| 207 | 209 | ||
| 208 | struct i915_page_table { | 210 | struct i915_page_dma { |
| 209 | struct page *page; | 211 | struct page *page; |
| 210 | dma_addr_t daddr; | 212 | union { |
| 213 | dma_addr_t daddr; | ||
| 214 | |||
| 215 | /* For gen6/gen7 only. This is the offset in the GGTT | ||
| 216 | * where the page directory entries for PPGTT begin | ||
| 217 | */ | ||
| 218 | uint32_t ggtt_offset; | ||
| 219 | }; | ||
| 220 | }; | ||
| 221 | |||
| 222 | #define px_base(px) (&(px)->base) | ||
| 223 | #define px_page(px) (px_base(px)->page) | ||
| 224 | #define px_dma(px) (px_base(px)->daddr) | ||
| 225 | |||
| 226 | struct i915_page_scratch { | ||
| 227 | struct i915_page_dma base; | ||
| 228 | }; | ||
| 229 | |||
| 230 | struct i915_page_table { | ||
| 231 | struct i915_page_dma base; | ||
| 211 | 232 | ||
| 212 | unsigned long *used_ptes; | 233 | unsigned long *used_ptes; |
| 213 | }; | 234 | }; |
| 214 | 235 | ||
| 215 | struct i915_page_directory { | 236 | struct i915_page_directory { |
| 216 | struct page *page; /* NULL for GEN6-GEN7 */ | 237 | struct i915_page_dma base; |
| 217 | union { | ||
| 218 | uint32_t pd_offset; | ||
| 219 | dma_addr_t daddr; | ||
| 220 | }; | ||
| 221 | 238 | ||
| 222 | unsigned long *used_pdes; | 239 | unsigned long *used_pdes; |
| 223 | struct i915_page_table *page_table[I915_PDES]; /* PDEs */ | 240 | struct i915_page_table *page_table[I915_PDES]; /* PDEs */ |
| @@ -233,13 +250,12 @@ struct i915_address_space { | |||
| 233 | struct drm_mm mm; | 250 | struct drm_mm mm; |
| 234 | struct drm_device *dev; | 251 | struct drm_device *dev; |
| 235 | struct list_head global_link; | 252 | struct list_head global_link; |
| 236 | unsigned long start; /* Start offset always 0 for dri2 */ | 253 | u64 start; /* Start offset always 0 for dri2 */ |
| 237 | size_t total; /* size addr space maps (ex. 2GB for ggtt) */ | 254 | u64 total; /* size addr space maps (ex. 2GB for ggtt) */ |
| 238 | 255 | ||
| 239 | struct { | 256 | struct i915_page_scratch *scratch_page; |
| 240 | dma_addr_t addr; | 257 | struct i915_page_table *scratch_pt; |
| 241 | struct page *page; | 258 | struct i915_page_directory *scratch_pd; |
| 242 | } scratch; | ||
| 243 | 259 | ||
| 244 | /** | 260 | /** |
| 245 | * List of objects currently involved in rendering. | 261 | * List of objects currently involved in rendering. |
| @@ -300,9 +316,9 @@ struct i915_address_space { | |||
| 300 | */ | 316 | */ |
| 301 | struct i915_gtt { | 317 | struct i915_gtt { |
| 302 | struct i915_address_space base; | 318 | struct i915_address_space base; |
| 303 | size_t stolen_size; /* Total size of stolen memory */ | ||
| 304 | 319 | ||
| 305 | unsigned long mappable_end; /* End offset that we can CPU map */ | 320 | size_t stolen_size; /* Total size of stolen memory */ |
| 321 | u64 mappable_end; /* End offset that we can CPU map */ | ||
| 306 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ | 322 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
| 307 | phys_addr_t mappable_base; /* PA of our GMADR */ | 323 | phys_addr_t mappable_base; /* PA of our GMADR */ |
| 308 | 324 | ||
| @@ -314,9 +330,9 @@ struct i915_gtt { | |||
| 314 | int mtrr; | 330 | int mtrr; |
| 315 | 331 | ||
| 316 | /* global gtt ops */ | 332 | /* global gtt ops */ |
| 317 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, | 333 | int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total, |
| 318 | size_t *stolen, phys_addr_t *mappable_base, | 334 | size_t *stolen, phys_addr_t *mappable_base, |
| 319 | unsigned long *mappable_end); | 335 | u64 *mappable_end); |
| 320 | }; | 336 | }; |
| 321 | 337 | ||
| 322 | struct i915_hw_ppgtt { | 338 | struct i915_hw_ppgtt { |
| @@ -329,16 +345,13 @@ struct i915_hw_ppgtt { | |||
| 329 | struct i915_page_directory pd; | 345 | struct i915_page_directory pd; |
| 330 | }; | 346 | }; |
| 331 | 347 | ||
| 332 | struct i915_page_table *scratch_pt; | ||
| 333 | struct i915_page_directory *scratch_pd; | ||
| 334 | |||
| 335 | struct drm_i915_file_private *file_priv; | 348 | struct drm_i915_file_private *file_priv; |
| 336 | 349 | ||
| 337 | gen6_pte_t __iomem *pd_addr; | 350 | gen6_pte_t __iomem *pd_addr; |
| 338 | 351 | ||
| 339 | int (*enable)(struct i915_hw_ppgtt *ppgtt); | 352 | int (*enable)(struct i915_hw_ppgtt *ppgtt); |
| 340 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, | 353 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, |
| 341 | struct intel_engine_cs *ring); | 354 | struct drm_i915_gem_request *req); |
| 342 | void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); | 355 | void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); |
| 343 | }; | 356 | }; |
| 344 | 357 | ||
| @@ -468,6 +481,14 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length) | |||
| 468 | return i915_pte_count(address, length, GEN8_PDE_SHIFT); | 481 | return i915_pte_count(address, length, GEN8_PDE_SHIFT); |
| 469 | } | 482 | } |
| 470 | 483 | ||
| 484 | static inline dma_addr_t | ||
| 485 | i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) | ||
| 486 | { | ||
| 487 | return test_bit(n, ppgtt->pdp.used_pdpes) ? | ||
| 488 | px_dma(ppgtt->pdp.page_directory[n]) : | ||
| 489 | px_dma(ppgtt->base.scratch_pd); | ||
| 490 | } | ||
| 491 | |||
| 471 | int i915_gem_gtt_init(struct drm_device *dev); | 492 | int i915_gem_gtt_init(struct drm_device *dev); |
| 472 | void i915_gem_init_global_gtt(struct drm_device *dev); | 493 | void i915_gem_init_global_gtt(struct drm_device *dev); |
| 473 | void i915_global_gtt_cleanup(struct drm_device *dev); | 494 | void i915_global_gtt_cleanup(struct drm_device *dev); |
| @@ -475,6 +496,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev); | |||
| 475 | 496 | ||
| 476 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); | 497 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); |
| 477 | int i915_ppgtt_init_hw(struct drm_device *dev); | 498 | int i915_ppgtt_init_hw(struct drm_device *dev); |
| 499 | int i915_ppgtt_init_ring(struct drm_i915_gem_request *req); | ||
| 478 | void i915_ppgtt_release(struct kref *kref); | 500 | void i915_ppgtt_release(struct kref *kref); |
| 479 | struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, | 501 | struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, |
| 480 | struct drm_i915_file_private *fpriv); | 502 | struct drm_i915_file_private *fpriv); |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 521548a08578..a0201fc94d25 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
| @@ -152,29 +152,26 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *ring, | |||
| 152 | return 0; | 152 | return 0; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | int i915_gem_render_state_init(struct intel_engine_cs *ring) | 155 | int i915_gem_render_state_init(struct drm_i915_gem_request *req) |
| 156 | { | 156 | { |
| 157 | struct render_state so; | 157 | struct render_state so; |
| 158 | int ret; | 158 | int ret; |
| 159 | 159 | ||
| 160 | ret = i915_gem_render_state_prepare(ring, &so); | 160 | ret = i915_gem_render_state_prepare(req->ring, &so); |
| 161 | if (ret) | 161 | if (ret) |
| 162 | return ret; | 162 | return ret; |
| 163 | 163 | ||
| 164 | if (so.rodata == NULL) | 164 | if (so.rodata == NULL) |
| 165 | return 0; | 165 | return 0; |
| 166 | 166 | ||
| 167 | ret = ring->dispatch_execbuffer(ring, | 167 | ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset, |
| 168 | so.ggtt_offset, | 168 | so.rodata->batch_items * 4, |
| 169 | so.rodata->batch_items * 4, | 169 | I915_DISPATCH_SECURE); |
| 170 | I915_DISPATCH_SECURE); | ||
| 171 | if (ret) | 170 | if (ret) |
| 172 | goto out; | 171 | goto out; |
| 173 | 172 | ||
| 174 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); | 173 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req); |
| 175 | 174 | ||
| 176 | ret = __i915_add_request(ring, NULL, so.obj); | ||
| 177 | /* __i915_add_request moves object to inactive if it fails */ | ||
| 178 | out: | 175 | out: |
| 179 | i915_gem_render_state_fini(&so); | 176 | i915_gem_render_state_fini(&so); |
| 180 | return ret; | 177 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h index c44961ed3fad..7aa73728178a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.h +++ b/drivers/gpu/drm/i915/i915_gem_render_state.h | |||
| @@ -39,7 +39,7 @@ struct render_state { | |||
| 39 | int gen; | 39 | int gen; |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | int i915_gem_render_state_init(struct intel_engine_cs *ring); | 42 | int i915_gem_render_state_init(struct drm_i915_gem_request *req); |
| 43 | void i915_gem_render_state_fini(struct render_state *so); | 43 | void i915_gem_render_state_fini(struct render_state *so); |
| 44 | int i915_gem_render_state_prepare(struct intel_engine_cs *ring, | 44 | int i915_gem_render_state_prepare(struct intel_engine_cs *ring, |
| 45 | struct render_state *so); | 45 | struct render_state *so); |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 8b5b784c62fe..ed682a9a9cbb 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
| @@ -42,6 +42,31 @@ | |||
| 42 | * for is a boon. | 42 | * for is a boon. |
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, | ||
| 46 | struct drm_mm_node *node, u64 size, | ||
| 47 | unsigned alignment) | ||
| 48 | { | ||
| 49 | int ret; | ||
| 50 | |||
| 51 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) | ||
| 52 | return -ENODEV; | ||
| 53 | |||
| 54 | mutex_lock(&dev_priv->mm.stolen_lock); | ||
| 55 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment, | ||
| 56 | DRM_MM_SEARCH_DEFAULT); | ||
| 57 | mutex_unlock(&dev_priv->mm.stolen_lock); | ||
| 58 | |||
| 59 | return ret; | ||
| 60 | } | ||
| 61 | |||
| 62 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, | ||
| 63 | struct drm_mm_node *node) | ||
| 64 | { | ||
| 65 | mutex_lock(&dev_priv->mm.stolen_lock); | ||
| 66 | drm_mm_remove_node(node); | ||
| 67 | mutex_unlock(&dev_priv->mm.stolen_lock); | ||
| 68 | } | ||
| 69 | |||
| 45 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) | 70 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
| 46 | { | 71 | { |
| 47 | struct drm_i915_private *dev_priv = dev->dev_private; | 72 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -151,134 +176,6 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) | |||
| 151 | return base; | 176 | return base; |
| 152 | } | 177 | } |
| 153 | 178 | ||
| 154 | static int find_compression_threshold(struct drm_device *dev, | ||
| 155 | struct drm_mm_node *node, | ||
| 156 | int size, | ||
| 157 | int fb_cpp) | ||
| 158 | { | ||
| 159 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 160 | int compression_threshold = 1; | ||
| 161 | int ret; | ||
| 162 | |||
| 163 | /* HACK: This code depends on what we will do in *_enable_fbc. If that | ||
| 164 | * code changes, this code needs to change as well. | ||
| 165 | * | ||
| 166 | * The enable_fbc code will attempt to use one of our 2 compression | ||
| 167 | * thresholds, therefore, in that case, we only have 1 resort. | ||
| 168 | */ | ||
| 169 | |||
| 170 | /* Try to over-allocate to reduce reallocations and fragmentation. */ | ||
| 171 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, | ||
| 172 | size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); | ||
| 173 | if (ret == 0) | ||
| 174 | return compression_threshold; | ||
| 175 | |||
| 176 | again: | ||
| 177 | /* HW's ability to limit the CFB is 1:4 */ | ||
| 178 | if (compression_threshold > 4 || | ||
| 179 | (fb_cpp == 2 && compression_threshold == 2)) | ||
| 180 | return 0; | ||
| 181 | |||
| 182 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, | ||
| 183 | size >>= 1, 4096, | ||
| 184 | DRM_MM_SEARCH_DEFAULT); | ||
| 185 | if (ret && INTEL_INFO(dev)->gen <= 4) { | ||
| 186 | return 0; | ||
| 187 | } else if (ret) { | ||
| 188 | compression_threshold <<= 1; | ||
| 189 | goto again; | ||
| 190 | } else { | ||
| 191 | return compression_threshold; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | |||
| 195 | static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) | ||
| 196 | { | ||
| 197 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 198 | struct drm_mm_node *uninitialized_var(compressed_llb); | ||
| 199 | int ret; | ||
| 200 | |||
| 201 | ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb, | ||
| 202 | size, fb_cpp); | ||
| 203 | if (!ret) | ||
| 204 | goto err_llb; | ||
| 205 | else if (ret > 1) { | ||
| 206 | DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); | ||
| 207 | |||
| 208 | } | ||
| 209 | |||
| 210 | dev_priv->fbc.threshold = ret; | ||
| 211 | |||
| 212 | if (INTEL_INFO(dev_priv)->gen >= 5) | ||
| 213 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); | ||
| 214 | else if (IS_GM45(dev)) { | ||
| 215 | I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); | ||
| 216 | } else { | ||
| 217 | compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); | ||
| 218 | if (!compressed_llb) | ||
| 219 | goto err_fb; | ||
| 220 | |||
| 221 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, | ||
| 222 | 4096, 4096, DRM_MM_SEARCH_DEFAULT); | ||
| 223 | if (ret) | ||
| 224 | goto err_fb; | ||
| 225 | |||
| 226 | dev_priv->fbc.compressed_llb = compressed_llb; | ||
| 227 | |||
| 228 | I915_WRITE(FBC_CFB_BASE, | ||
| 229 | dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); | ||
| 230 | I915_WRITE(FBC_LL_BASE, | ||
| 231 | dev_priv->mm.stolen_base + compressed_llb->start); | ||
| 232 | } | ||
| 233 | |||
| 234 | dev_priv->fbc.uncompressed_size = size; | ||
| 235 | |||
| 236 | DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", | ||
| 237 | size); | ||
| 238 | |||
| 239 | return 0; | ||
| 240 | |||
| 241 | err_fb: | ||
| 242 | kfree(compressed_llb); | ||
| 243 | drm_mm_remove_node(&dev_priv->fbc.compressed_fb); | ||
| 244 | err_llb: | ||
| 245 | pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); | ||
| 246 | return -ENOSPC; | ||
| 247 | } | ||
| 248 | |||
| 249 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp) | ||
| 250 | { | ||
| 251 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 252 | |||
| 253 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) | ||
| 254 | return -ENODEV; | ||
| 255 | |||
| 256 | if (size <= dev_priv->fbc.uncompressed_size) | ||
| 257 | return 0; | ||
| 258 | |||
| 259 | /* Release any current block */ | ||
| 260 | i915_gem_stolen_cleanup_compression(dev); | ||
| 261 | |||
| 262 | return i915_setup_compression(dev, size, fb_cpp); | ||
| 263 | } | ||
| 264 | |||
| 265 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev) | ||
| 266 | { | ||
| 267 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 268 | |||
| 269 | if (dev_priv->fbc.uncompressed_size == 0) | ||
| 270 | return; | ||
| 271 | |||
| 272 | drm_mm_remove_node(&dev_priv->fbc.compressed_fb); | ||
| 273 | |||
| 274 | if (dev_priv->fbc.compressed_llb) { | ||
| 275 | drm_mm_remove_node(dev_priv->fbc.compressed_llb); | ||
| 276 | kfree(dev_priv->fbc.compressed_llb); | ||
| 277 | } | ||
| 278 | |||
| 279 | dev_priv->fbc.uncompressed_size = 0; | ||
| 280 | } | ||
| 281 | |||
| 282 | void i915_gem_cleanup_stolen(struct drm_device *dev) | 179 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
| 283 | { | 180 | { |
| 284 | struct drm_i915_private *dev_priv = dev->dev_private; | 181 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -286,7 +183,6 @@ void i915_gem_cleanup_stolen(struct drm_device *dev) | |||
| 286 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) | 183 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
| 287 | return; | 184 | return; |
| 288 | 185 | ||
| 289 | i915_gem_stolen_cleanup_compression(dev); | ||
| 290 | drm_mm_takedown(&dev_priv->mm.stolen); | 186 | drm_mm_takedown(&dev_priv->mm.stolen); |
| 291 | } | 187 | } |
| 292 | 188 | ||
| @@ -296,6 +192,8 @@ int i915_gem_init_stolen(struct drm_device *dev) | |||
| 296 | u32 tmp; | 192 | u32 tmp; |
| 297 | int bios_reserved = 0; | 193 | int bios_reserved = 0; |
| 298 | 194 | ||
| 195 | mutex_init(&dev_priv->mm.stolen_lock); | ||
| 196 | |||
| 299 | #ifdef CONFIG_INTEL_IOMMU | 197 | #ifdef CONFIG_INTEL_IOMMU |
| 300 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { | 198 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { |
| 301 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); | 199 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
| @@ -386,8 +284,10 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) | |||
| 386 | static void | 284 | static void |
| 387 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) | 285 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
| 388 | { | 286 | { |
| 287 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
| 288 | |||
| 389 | if (obj->stolen) { | 289 | if (obj->stolen) { |
| 390 | drm_mm_remove_node(obj->stolen); | 290 | i915_gem_stolen_remove_node(dev_priv, obj->stolen); |
| 391 | kfree(obj->stolen); | 291 | kfree(obj->stolen); |
| 392 | obj->stolen = NULL; | 292 | obj->stolen = NULL; |
| 393 | } | 293 | } |
| @@ -448,8 +348,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) | |||
| 448 | if (!stolen) | 348 | if (!stolen) |
| 449 | return NULL; | 349 | return NULL; |
| 450 | 350 | ||
| 451 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size, | 351 | ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); |
| 452 | 4096, DRM_MM_SEARCH_DEFAULT); | ||
| 453 | if (ret) { | 352 | if (ret) { |
| 454 | kfree(stolen); | 353 | kfree(stolen); |
| 455 | return NULL; | 354 | return NULL; |
| @@ -459,7 +358,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) | |||
| 459 | if (obj) | 358 | if (obj) |
| 460 | return obj; | 359 | return obj; |
| 461 | 360 | ||
| 462 | drm_mm_remove_node(stolen); | 361 | i915_gem_stolen_remove_node(dev_priv, stolen); |
| 463 | kfree(stolen); | 362 | kfree(stolen); |
| 464 | return NULL; | 363 | return NULL; |
| 465 | } | 364 | } |
| @@ -494,7 +393,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
| 494 | 393 | ||
| 495 | stolen->start = stolen_offset; | 394 | stolen->start = stolen_offset; |
| 496 | stolen->size = size; | 395 | stolen->size = size; |
| 396 | mutex_lock(&dev_priv->mm.stolen_lock); | ||
| 497 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); | 397 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
| 398 | mutex_unlock(&dev_priv->mm.stolen_lock); | ||
| 498 | if (ret) { | 399 | if (ret) { |
| 499 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); | 400 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
| 500 | kfree(stolen); | 401 | kfree(stolen); |
| @@ -504,7 +405,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
| 504 | obj = _i915_gem_object_create_stolen(dev, stolen); | 405 | obj = _i915_gem_object_create_stolen(dev, stolen); |
| 505 | if (obj == NULL) { | 406 | if (obj == NULL) { |
| 506 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); | 407 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
| 507 | drm_mm_remove_node(stolen); | 408 | i915_gem_stolen_remove_node(dev_priv, stolen); |
| 508 | kfree(stolen); | 409 | kfree(stolen); |
| 509 | return NULL; | 410 | return NULL; |
| 510 | } | 411 | } |
| @@ -545,7 +446,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
| 545 | err_vma: | 446 | err_vma: |
| 546 | i915_gem_vma_destroy(vma); | 447 | i915_gem_vma_destroy(vma); |
| 547 | err_out: | 448 | err_out: |
| 548 | drm_mm_remove_node(stolen); | 449 | i915_gem_stolen_remove_node(dev_priv, stolen); |
| 549 | kfree(stolen); | 450 | kfree(stolen); |
| 550 | drm_gem_object_unreference(&obj->base); | 451 | drm_gem_object_unreference(&obj->base); |
| 551 | return NULL; | 452 | return NULL; |
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 23aa04cded6b..97f3a5640289 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c | |||
| @@ -35,107 +35,20 @@ | |||
| 35 | #include <drm/i915_drm.h> | 35 | #include <drm/i915_drm.h> |
| 36 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
| 37 | 37 | ||
| 38 | typedef struct _drm_i915_batchbuffer32 { | 38 | struct drm_i915_getparam32 { |
| 39 | int start; /* agp offset */ | 39 | s32 param; |
| 40 | int used; /* nr bytes in use */ | 40 | /* |
| 41 | int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ | 41 | * We screwed up the generic ioctl struct here and used a variable-sized |
| 42 | int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ | 42 | * pointer. Use u32 in the compat struct to match the 32bit pointer |
| 43 | int num_cliprects; /* mulitpass with multiple cliprects? */ | 43 | * userspace expects. |
| 44 | u32 cliprects; /* pointer to userspace cliprects */ | 44 | */ |
| 45 | } drm_i915_batchbuffer32_t; | ||
| 46 | |||
| 47 | static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, | ||
| 48 | unsigned long arg) | ||
| 49 | { | ||
| 50 | drm_i915_batchbuffer32_t batchbuffer32; | ||
| 51 | drm_i915_batchbuffer_t __user *batchbuffer; | ||
| 52 | |||
| 53 | if (copy_from_user | ||
| 54 | (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32))) | ||
| 55 | return -EFAULT; | ||
| 56 | |||
| 57 | batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer)); | ||
| 58 | if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer)) | ||
| 59 | || __put_user(batchbuffer32.start, &batchbuffer->start) | ||
| 60 | || __put_user(batchbuffer32.used, &batchbuffer->used) | ||
| 61 | || __put_user(batchbuffer32.DR1, &batchbuffer->DR1) | ||
| 62 | || __put_user(batchbuffer32.DR4, &batchbuffer->DR4) | ||
| 63 | || __put_user(batchbuffer32.num_cliprects, | ||
| 64 | &batchbuffer->num_cliprects) | ||
| 65 | || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects, | ||
| 66 | &batchbuffer->cliprects)) | ||
| 67 | return -EFAULT; | ||
| 68 | |||
| 69 | return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER, | ||
| 70 | (unsigned long)batchbuffer); | ||
| 71 | } | ||
| 72 | |||
| 73 | typedef struct _drm_i915_cmdbuffer32 { | ||
| 74 | u32 buf; /* pointer to userspace command buffer */ | ||
| 75 | int sz; /* nr bytes in buf */ | ||
| 76 | int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ | ||
| 77 | int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ | ||
| 78 | int num_cliprects; /* mulitpass with multiple cliprects? */ | ||
| 79 | u32 cliprects; /* pointer to userspace cliprects */ | ||
| 80 | } drm_i915_cmdbuffer32_t; | ||
| 81 | |||
| 82 | static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, | ||
| 83 | unsigned long arg) | ||
| 84 | { | ||
| 85 | drm_i915_cmdbuffer32_t cmdbuffer32; | ||
| 86 | drm_i915_cmdbuffer_t __user *cmdbuffer; | ||
| 87 | |||
| 88 | if (copy_from_user | ||
| 89 | (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32))) | ||
| 90 | return -EFAULT; | ||
| 91 | |||
| 92 | cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer)); | ||
| 93 | if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer)) | ||
| 94 | || __put_user((int __user *)(unsigned long)cmdbuffer32.buf, | ||
| 95 | &cmdbuffer->buf) | ||
| 96 | || __put_user(cmdbuffer32.sz, &cmdbuffer->sz) | ||
| 97 | || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1) | ||
| 98 | || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4) | ||
| 99 | || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects) | ||
| 100 | || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects, | ||
| 101 | &cmdbuffer->cliprects)) | ||
| 102 | return -EFAULT; | ||
| 103 | |||
| 104 | return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER, | ||
| 105 | (unsigned long)cmdbuffer); | ||
| 106 | } | ||
| 107 | |||
| 108 | typedef struct drm_i915_irq_emit32 { | ||
| 109 | u32 irq_seq; | ||
| 110 | } drm_i915_irq_emit32_t; | ||
| 111 | |||
| 112 | static int compat_i915_irq_emit(struct file *file, unsigned int cmd, | ||
| 113 | unsigned long arg) | ||
| 114 | { | ||
| 115 | drm_i915_irq_emit32_t req32; | ||
| 116 | drm_i915_irq_emit_t __user *request; | ||
| 117 | |||
| 118 | if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) | ||
| 119 | return -EFAULT; | ||
| 120 | |||
| 121 | request = compat_alloc_user_space(sizeof(*request)); | ||
| 122 | if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) | ||
| 123 | || __put_user((int __user *)(unsigned long)req32.irq_seq, | ||
| 124 | &request->irq_seq)) | ||
| 125 | return -EFAULT; | ||
| 126 | |||
| 127 | return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT, | ||
| 128 | (unsigned long)request); | ||
| 129 | } | ||
| 130 | typedef struct drm_i915_getparam32 { | ||
| 131 | int param; | ||
| 132 | u32 value; | 45 | u32 value; |
| 133 | } drm_i915_getparam32_t; | 46 | }; |
| 134 | 47 | ||
| 135 | static int compat_i915_getparam(struct file *file, unsigned int cmd, | 48 | static int compat_i915_getparam(struct file *file, unsigned int cmd, |
| 136 | unsigned long arg) | 49 | unsigned long arg) |
| 137 | { | 50 | { |
| 138 | drm_i915_getparam32_t req32; | 51 | struct drm_i915_getparam32 req32; |
| 139 | drm_i915_getparam_t __user *request; | 52 | drm_i915_getparam_t __user *request; |
| 140 | 53 | ||
| 141 | if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) | 54 | if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) |
| @@ -152,41 +65,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd, | |||
| 152 | (unsigned long)request); | 65 | (unsigned long)request); |
| 153 | } | 66 | } |
| 154 | 67 | ||
| 155 | typedef struct drm_i915_mem_alloc32 { | ||
| 156 | int region; | ||
| 157 | int alignment; | ||
| 158 | int size; | ||
| 159 | u32 region_offset; /* offset from start of fb or agp */ | ||
| 160 | } drm_i915_mem_alloc32_t; | ||
| 161 | |||
| 162 | static int compat_i915_alloc(struct file *file, unsigned int cmd, | ||
| 163 | unsigned long arg) | ||
| 164 | { | ||
| 165 | drm_i915_mem_alloc32_t req32; | ||
| 166 | drm_i915_mem_alloc_t __user *request; | ||
| 167 | |||
| 168 | if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) | ||
| 169 | return -EFAULT; | ||
| 170 | |||
| 171 | request = compat_alloc_user_space(sizeof(*request)); | ||
| 172 | if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) | ||
| 173 | || __put_user(req32.region, &request->region) | ||
| 174 | || __put_user(req32.alignment, &request->alignment) | ||
| 175 | || __put_user(req32.size, &request->size) | ||
| 176 | || __put_user((void __user *)(unsigned long)req32.region_offset, | ||
| 177 | &request->region_offset)) | ||
| 178 | return -EFAULT; | ||
| 179 | |||
| 180 | return drm_ioctl(file, DRM_IOCTL_I915_ALLOC, | ||
| 181 | (unsigned long)request); | ||
| 182 | } | ||
| 183 | |||
| 184 | static drm_ioctl_compat_t *i915_compat_ioctls[] = { | 68 | static drm_ioctl_compat_t *i915_compat_ioctls[] = { |
| 185 | [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, | ||
| 186 | [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, | ||
| 187 | [DRM_I915_GETPARAM] = compat_i915_getparam, | 69 | [DRM_I915_GETPARAM] = compat_i915_getparam, |
| 188 | [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, | ||
| 189 | [DRM_I915_ALLOC] = compat_i915_alloc | ||
| 190 | }; | 70 | }; |
| 191 | 71 | ||
| 192 | /** | 72 | /** |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 984e2fe6688c..d87f173a0179 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -564,8 +564,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 564 | u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; | 564 | u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; |
| 565 | struct intel_crtc *intel_crtc = | 565 | struct intel_crtc *intel_crtc = |
| 566 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 566 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
| 567 | const struct drm_display_mode *mode = | 567 | const struct drm_display_mode *mode = &intel_crtc->base.hwmode; |
| 568 | &intel_crtc->config->base.adjusted_mode; | ||
| 569 | 568 | ||
| 570 | htotal = mode->crtc_htotal; | 569 | htotal = mode->crtc_htotal; |
| 571 | hsync_start = mode->crtc_hsync_start; | 570 | hsync_start = mode->crtc_hsync_start; |
| @@ -620,7 +619,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) | |||
| 620 | { | 619 | { |
| 621 | struct drm_device *dev = crtc->base.dev; | 620 | struct drm_device *dev = crtc->base.dev; |
| 622 | struct drm_i915_private *dev_priv = dev->dev_private; | 621 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 623 | const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; | 622 | const struct drm_display_mode *mode = &crtc->base.hwmode; |
| 624 | enum pipe pipe = crtc->pipe; | 623 | enum pipe pipe = crtc->pipe; |
| 625 | int position, vtotal; | 624 | int position, vtotal; |
| 626 | 625 | ||
| @@ -647,14 +646,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
| 647 | struct drm_i915_private *dev_priv = dev->dev_private; | 646 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 648 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 647 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 649 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 648 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 650 | const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; | 649 | const struct drm_display_mode *mode = &intel_crtc->base.hwmode; |
| 651 | int position; | 650 | int position; |
| 652 | int vbl_start, vbl_end, hsync_start, htotal, vtotal; | 651 | int vbl_start, vbl_end, hsync_start, htotal, vtotal; |
| 653 | bool in_vbl = true; | 652 | bool in_vbl = true; |
| 654 | int ret = 0; | 653 | int ret = 0; |
| 655 | unsigned long irqflags; | 654 | unsigned long irqflags; |
| 656 | 655 | ||
| 657 | if (!intel_crtc->active) { | 656 | if (WARN_ON(!mode->crtc_clock)) { |
| 658 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | 657 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
| 659 | "pipe %c\n", pipe_name(pipe)); | 658 | "pipe %c\n", pipe_name(pipe)); |
| 660 | return 0; | 659 | return 0; |
| @@ -796,7 +795,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | |||
| 796 | return -EINVAL; | 795 | return -EINVAL; |
| 797 | } | 796 | } |
| 798 | 797 | ||
| 799 | if (!crtc->state->enable) { | 798 | if (!crtc->hwmode.crtc_clock) { |
| 800 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | 799 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); |
| 801 | return -EBUSY; | 800 | return -EBUSY; |
| 802 | } | 801 | } |
| @@ -805,151 +804,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | |||
| 805 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | 804 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
| 806 | vblank_time, flags, | 805 | vblank_time, flags, |
| 807 | crtc, | 806 | crtc, |
| 808 | &to_intel_crtc(crtc)->config->base.adjusted_mode); | 807 | &crtc->hwmode); |
| 809 | } | ||
| 810 | |||
| 811 | static bool intel_hpd_irq_event(struct drm_device *dev, | ||
| 812 | struct drm_connector *connector) | ||
| 813 | { | ||
| 814 | enum drm_connector_status old_status; | ||
| 815 | |||
| 816 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
| 817 | old_status = connector->status; | ||
| 818 | |||
| 819 | connector->status = connector->funcs->detect(connector, false); | ||
| 820 | if (old_status == connector->status) | ||
| 821 | return false; | ||
| 822 | |||
| 823 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | ||
| 824 | connector->base.id, | ||
| 825 | connector->name, | ||
| 826 | drm_get_connector_status_name(old_status), | ||
| 827 | drm_get_connector_status_name(connector->status)); | ||
| 828 | |||
| 829 | return true; | ||
| 830 | } | ||
| 831 | |||
| 832 | static void i915_digport_work_func(struct work_struct *work) | ||
| 833 | { | ||
| 834 | struct drm_i915_private *dev_priv = | ||
| 835 | container_of(work, struct drm_i915_private, dig_port_work); | ||
| 836 | u32 long_port_mask, short_port_mask; | ||
| 837 | struct intel_digital_port *intel_dig_port; | ||
| 838 | int i; | ||
| 839 | u32 old_bits = 0; | ||
| 840 | |||
| 841 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 842 | long_port_mask = dev_priv->long_hpd_port_mask; | ||
| 843 | dev_priv->long_hpd_port_mask = 0; | ||
| 844 | short_port_mask = dev_priv->short_hpd_port_mask; | ||
| 845 | dev_priv->short_hpd_port_mask = 0; | ||
| 846 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 847 | |||
| 848 | for (i = 0; i < I915_MAX_PORTS; i++) { | ||
| 849 | bool valid = false; | ||
| 850 | bool long_hpd = false; | ||
| 851 | intel_dig_port = dev_priv->hpd_irq_port[i]; | ||
| 852 | if (!intel_dig_port || !intel_dig_port->hpd_pulse) | ||
| 853 | continue; | ||
| 854 | |||
| 855 | if (long_port_mask & (1 << i)) { | ||
| 856 | valid = true; | ||
| 857 | long_hpd = true; | ||
| 858 | } else if (short_port_mask & (1 << i)) | ||
| 859 | valid = true; | ||
| 860 | |||
| 861 | if (valid) { | ||
| 862 | enum irqreturn ret; | ||
| 863 | |||
| 864 | ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); | ||
| 865 | if (ret == IRQ_NONE) { | ||
| 866 | /* fall back to old school hpd */ | ||
| 867 | old_bits |= (1 << intel_dig_port->base.hpd_pin); | ||
| 868 | } | ||
| 869 | } | ||
| 870 | } | ||
| 871 | |||
| 872 | if (old_bits) { | ||
| 873 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 874 | dev_priv->hpd_event_bits |= old_bits; | ||
| 875 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 876 | schedule_work(&dev_priv->hotplug_work); | ||
| 877 | } | ||
| 878 | } | ||
| 879 | |||
| 880 | /* | ||
| 881 | * Handle hotplug events outside the interrupt handler proper. | ||
| 882 | */ | ||
| 883 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) | ||
| 884 | |||
| 885 | static void i915_hotplug_work_func(struct work_struct *work) | ||
| 886 | { | ||
| 887 | struct drm_i915_private *dev_priv = | ||
| 888 | container_of(work, struct drm_i915_private, hotplug_work); | ||
| 889 | struct drm_device *dev = dev_priv->dev; | ||
| 890 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 891 | struct intel_connector *intel_connector; | ||
| 892 | struct intel_encoder *intel_encoder; | ||
| 893 | struct drm_connector *connector; | ||
| 894 | bool hpd_disabled = false; | ||
| 895 | bool changed = false; | ||
| 896 | u32 hpd_event_bits; | ||
| 897 | |||
| 898 | mutex_lock(&mode_config->mutex); | ||
| 899 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | ||
| 900 | |||
| 901 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 902 | |||
| 903 | hpd_event_bits = dev_priv->hpd_event_bits; | ||
| 904 | dev_priv->hpd_event_bits = 0; | ||
| 905 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 906 | intel_connector = to_intel_connector(connector); | ||
| 907 | if (!intel_connector->encoder) | ||
| 908 | continue; | ||
| 909 | intel_encoder = intel_connector->encoder; | ||
| 910 | if (intel_encoder->hpd_pin > HPD_NONE && | ||
| 911 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | ||
| 912 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | ||
| 913 | DRM_INFO("HPD interrupt storm detected on connector %s: " | ||
| 914 | "switching from hotplug detection to polling\n", | ||
| 915 | connector->name); | ||
| 916 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; | ||
| 917 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | ||
| 918 | | DRM_CONNECTOR_POLL_DISCONNECT; | ||
| 919 | hpd_disabled = true; | ||
| 920 | } | ||
| 921 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | ||
| 922 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | ||
| 923 | connector->name, intel_encoder->hpd_pin); | ||
| 924 | } | ||
| 925 | } | ||
| 926 | /* if there were no outputs to poll, poll was disabled, | ||
| 927 | * therefore make sure it's enabled when disabling HPD on | ||
| 928 | * some connectors */ | ||
| 929 | if (hpd_disabled) { | ||
| 930 | drm_kms_helper_poll_enable(dev); | ||
| 931 | mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, | ||
| 932 | msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | ||
| 933 | } | ||
| 934 | |||
| 935 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 936 | |||
| 937 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 938 | intel_connector = to_intel_connector(connector); | ||
| 939 | if (!intel_connector->encoder) | ||
| 940 | continue; | ||
| 941 | intel_encoder = intel_connector->encoder; | ||
| 942 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | ||
| 943 | if (intel_encoder->hot_plug) | ||
| 944 | intel_encoder->hot_plug(intel_encoder); | ||
| 945 | if (intel_hpd_irq_event(dev, connector)) | ||
| 946 | changed = true; | ||
| 947 | } | ||
| 948 | } | ||
| 949 | mutex_unlock(&mode_config->mutex); | ||
| 950 | |||
| 951 | if (changed) | ||
| 952 | drm_kms_helper_hotplug_event(dev); | ||
| 953 | } | 808 | } |
| 954 | 809 | ||
| 955 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) | 810 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
| @@ -1372,165 +1227,87 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, | |||
| 1372 | return ret; | 1227 | return ret; |
| 1373 | } | 1228 | } |
| 1374 | 1229 | ||
| 1375 | #define HPD_STORM_DETECT_PERIOD 1000 | 1230 | static bool pch_port_hotplug_long_detect(enum port port, u32 val) |
| 1376 | #define HPD_STORM_THRESHOLD 5 | ||
| 1377 | |||
| 1378 | static int pch_port_to_hotplug_shift(enum port port) | ||
| 1379 | { | 1231 | { |
| 1380 | switch (port) { | 1232 | switch (port) { |
| 1381 | case PORT_A: | ||
| 1382 | case PORT_E: | ||
| 1383 | default: | ||
| 1384 | return -1; | ||
| 1385 | case PORT_B: | 1233 | case PORT_B: |
| 1386 | return 0; | 1234 | return val & PORTB_HOTPLUG_LONG_DETECT; |
| 1387 | case PORT_C: | 1235 | case PORT_C: |
| 1388 | return 8; | 1236 | return val & PORTC_HOTPLUG_LONG_DETECT; |
| 1389 | case PORT_D: | 1237 | case PORT_D: |
| 1390 | return 16; | 1238 | return val & PORTD_HOTPLUG_LONG_DETECT; |
| 1239 | default: | ||
| 1240 | return false; | ||
| 1391 | } | 1241 | } |
| 1392 | } | 1242 | } |
| 1393 | 1243 | ||
| 1394 | static int i915_port_to_hotplug_shift(enum port port) | 1244 | static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) |
| 1395 | { | 1245 | { |
| 1396 | switch (port) { | 1246 | switch (port) { |
| 1397 | case PORT_A: | ||
| 1398 | case PORT_E: | ||
| 1399 | default: | ||
| 1400 | return -1; | ||
| 1401 | case PORT_B: | 1247 | case PORT_B: |
| 1402 | return 17; | 1248 | return val & PORTB_HOTPLUG_INT_LONG_PULSE; |
| 1403 | case PORT_C: | 1249 | case PORT_C: |
| 1404 | return 19; | 1250 | return val & PORTC_HOTPLUG_INT_LONG_PULSE; |
| 1405 | case PORT_D: | 1251 | case PORT_D: |
| 1406 | return 21; | 1252 | return val & PORTD_HOTPLUG_INT_LONG_PULSE; |
| 1407 | } | ||
| 1408 | } | ||
| 1409 | |||
| 1410 | static enum port get_port_from_pin(enum hpd_pin pin) | ||
| 1411 | { | ||
| 1412 | switch (pin) { | ||
| 1413 | case HPD_PORT_B: | ||
| 1414 | return PORT_B; | ||
| 1415 | case HPD_PORT_C: | ||
| 1416 | return PORT_C; | ||
| 1417 | case HPD_PORT_D: | ||
| 1418 | return PORT_D; | ||
| 1419 | default: | 1253 | default: |
| 1420 | return PORT_A; /* no hpd */ | 1254 | return false; |
| 1421 | } | 1255 | } |
| 1422 | } | 1256 | } |
| 1423 | 1257 | ||
| 1424 | static void intel_hpd_irq_handler(struct drm_device *dev, | 1258 | /* Get a bit mask of pins that have triggered, and which ones may be long. */ |
| 1425 | u32 hotplug_trigger, | 1259 | static void pch_get_hpd_pins(u32 *pin_mask, u32 *long_mask, |
| 1426 | u32 dig_hotplug_reg, | 1260 | u32 hotplug_trigger, u32 dig_hotplug_reg, |
| 1427 | const u32 hpd[HPD_NUM_PINS]) | 1261 | const u32 hpd[HPD_NUM_PINS]) |
| 1428 | { | 1262 | { |
| 1429 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1430 | int i; | ||
| 1431 | enum port port; | 1263 | enum port port; |
| 1432 | bool storm_detected = false; | 1264 | int i; |
| 1433 | bool queue_dig = false, queue_hp = false; | ||
| 1434 | u32 dig_shift; | ||
| 1435 | u32 dig_port_mask = 0; | ||
| 1436 | |||
| 1437 | if (!hotplug_trigger) | ||
| 1438 | return; | ||
| 1439 | 1265 | ||
| 1440 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", | 1266 | *pin_mask = 0; |
| 1441 | hotplug_trigger, dig_hotplug_reg); | 1267 | *long_mask = 0; |
| 1442 | 1268 | ||
| 1443 | spin_lock(&dev_priv->irq_lock); | 1269 | for_each_hpd_pin(i) { |
| 1444 | for (i = 1; i < HPD_NUM_PINS; i++) { | 1270 | if ((hpd[i] & hotplug_trigger) == 0) |
| 1445 | if (!(hpd[i] & hotplug_trigger)) | ||
| 1446 | continue; | 1271 | continue; |
| 1447 | 1272 | ||
| 1448 | port = get_port_from_pin(i); | 1273 | *pin_mask |= BIT(i); |
| 1449 | if (port && dev_priv->hpd_irq_port[port]) { | ||
| 1450 | bool long_hpd; | ||
| 1451 | 1274 | ||
| 1452 | if (!HAS_GMCH_DISPLAY(dev_priv)) { | 1275 | port = intel_hpd_pin_to_port(i); |
| 1453 | dig_shift = pch_port_to_hotplug_shift(port); | 1276 | if (pch_port_hotplug_long_detect(port, dig_hotplug_reg)) |
| 1454 | long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; | 1277 | *long_mask |= BIT(i); |
| 1455 | } else { | ||
| 1456 | dig_shift = i915_port_to_hotplug_shift(port); | ||
| 1457 | long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; | ||
| 1458 | } | ||
| 1459 | |||
| 1460 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", | ||
| 1461 | port_name(port), | ||
| 1462 | long_hpd ? "long" : "short"); | ||
| 1463 | /* for long HPD pulses we want to have the digital queue happen, | ||
| 1464 | but we still want HPD storm detection to function. */ | ||
| 1465 | if (long_hpd) { | ||
| 1466 | dev_priv->long_hpd_port_mask |= (1 << port); | ||
| 1467 | dig_port_mask |= hpd[i]; | ||
| 1468 | } else { | ||
| 1469 | /* for short HPD just trigger the digital queue */ | ||
| 1470 | dev_priv->short_hpd_port_mask |= (1 << port); | ||
| 1471 | hotplug_trigger &= ~hpd[i]; | ||
| 1472 | } | ||
| 1473 | queue_dig = true; | ||
| 1474 | } | ||
| 1475 | } | 1278 | } |
| 1476 | 1279 | ||
| 1477 | for (i = 1; i < HPD_NUM_PINS; i++) { | 1280 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", |
| 1478 | if (hpd[i] & hotplug_trigger && | 1281 | hotplug_trigger, dig_hotplug_reg, *pin_mask); |
| 1479 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { | ||
| 1480 | /* | ||
| 1481 | * On GMCH platforms the interrupt mask bits only | ||
| 1482 | * prevent irq generation, not the setting of the | ||
| 1483 | * hotplug bits itself. So only WARN about unexpected | ||
| 1484 | * interrupts on saner platforms. | ||
| 1485 | */ | ||
| 1486 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | ||
| 1487 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | ||
| 1488 | hotplug_trigger, i, hpd[i]); | ||
| 1489 | 1282 | ||
| 1490 | continue; | 1283 | } |
| 1491 | } | 1284 | |
| 1285 | /* Get a bit mask of pins that have triggered, and which ones may be long. */ | ||
| 1286 | static void i9xx_get_hpd_pins(u32 *pin_mask, u32 *long_mask, | ||
| 1287 | u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) | ||
| 1288 | { | ||
| 1289 | enum port port; | ||
| 1290 | int i; | ||
| 1291 | |||
| 1292 | *pin_mask = 0; | ||
| 1293 | *long_mask = 0; | ||
| 1294 | |||
| 1295 | if (!hotplug_trigger) | ||
| 1296 | return; | ||
| 1492 | 1297 | ||
| 1493 | if (!(hpd[i] & hotplug_trigger) || | 1298 | for_each_hpd_pin(i) { |
| 1494 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | 1299 | if ((hpd[i] & hotplug_trigger) == 0) |
| 1495 | continue; | 1300 | continue; |
| 1496 | 1301 | ||
| 1497 | if (!(dig_port_mask & hpd[i])) { | 1302 | *pin_mask |= BIT(i); |
| 1498 | dev_priv->hpd_event_bits |= (1 << i); | ||
| 1499 | queue_hp = true; | ||
| 1500 | } | ||
| 1501 | 1303 | ||
| 1502 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, | 1304 | port = intel_hpd_pin_to_port(i); |
| 1503 | dev_priv->hpd_stats[i].hpd_last_jiffies | 1305 | if (i9xx_port_hotplug_long_detect(port, hotplug_trigger)) |
| 1504 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | 1306 | *long_mask |= BIT(i); |
| 1505 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | ||
| 1506 | dev_priv->hpd_stats[i].hpd_cnt = 0; | ||
| 1507 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); | ||
| 1508 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { | ||
| 1509 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | ||
| 1510 | dev_priv->hpd_event_bits &= ~(1 << i); | ||
| 1511 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); | ||
| 1512 | storm_detected = true; | ||
| 1513 | } else { | ||
| 1514 | dev_priv->hpd_stats[i].hpd_cnt++; | ||
| 1515 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, | ||
| 1516 | dev_priv->hpd_stats[i].hpd_cnt); | ||
| 1517 | } | ||
| 1518 | } | 1307 | } |
| 1519 | 1308 | ||
| 1520 | if (storm_detected) | 1309 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, pins 0x%08x\n", |
| 1521 | dev_priv->display.hpd_irq_setup(dev); | 1310 | hotplug_trigger, *pin_mask); |
| 1522 | spin_unlock(&dev_priv->irq_lock); | ||
| 1523 | |||
| 1524 | /* | ||
| 1525 | * Our hotplug handler can grab modeset locks (by calling down into the | ||
| 1526 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | ||
| 1527 | * queue for otherwise the flush_work in the pageflip code will | ||
| 1528 | * deadlock. | ||
| 1529 | */ | ||
| 1530 | if (queue_dig) | ||
| 1531 | queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); | ||
| 1532 | if (queue_hp) | ||
| 1533 | schedule_work(&dev_priv->hotplug_work); | ||
| 1534 | } | 1311 | } |
| 1535 | 1312 | ||
| 1536 | static void gmbus_irq_handler(struct drm_device *dev) | 1313 | static void gmbus_irq_handler(struct drm_device *dev) |
| @@ -1755,28 +1532,31 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) | |||
| 1755 | { | 1532 | { |
| 1756 | struct drm_i915_private *dev_priv = dev->dev_private; | 1533 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1757 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 1534 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
| 1535 | u32 pin_mask, long_mask; | ||
| 1758 | 1536 | ||
| 1759 | if (hotplug_status) { | 1537 | if (!hotplug_status) |
| 1760 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 1538 | return; |
| 1761 | /* | ||
| 1762 | * Make sure hotplug status is cleared before we clear IIR, or else we | ||
| 1763 | * may miss hotplug events. | ||
| 1764 | */ | ||
| 1765 | POSTING_READ(PORT_HOTPLUG_STAT); | ||
| 1766 | 1539 | ||
| 1767 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { | 1540 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 1768 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | 1541 | /* |
| 1542 | * Make sure hotplug status is cleared before we clear IIR, or else we | ||
| 1543 | * may miss hotplug events. | ||
| 1544 | */ | ||
| 1545 | POSTING_READ(PORT_HOTPLUG_STAT); | ||
| 1769 | 1546 | ||
| 1770 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); | 1547 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { |
| 1771 | } else { | 1548 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; |
| 1772 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | ||
| 1773 | 1549 | ||
| 1774 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); | 1550 | i9xx_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, hpd_status_g4x); |
| 1775 | } | 1551 | intel_hpd_irq_handler(dev, pin_mask, long_mask); |
| 1776 | 1552 | ||
| 1777 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && | 1553 | if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) |
| 1778 | hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | ||
| 1779 | dp_aux_irq_handler(dev); | 1554 | dp_aux_irq_handler(dev); |
| 1555 | } else { | ||
| 1556 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | ||
| 1557 | |||
| 1558 | i9xx_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, hpd_status_i915); | ||
| 1559 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
| 1780 | } | 1560 | } |
| 1781 | } | 1561 | } |
| 1782 | 1562 | ||
| @@ -1875,12 +1655,17 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
| 1875 | struct drm_i915_private *dev_priv = dev->dev_private; | 1655 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1876 | int pipe; | 1656 | int pipe; |
| 1877 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; | 1657 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
| 1878 | u32 dig_hotplug_reg; | ||
| 1879 | 1658 | ||
| 1880 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | 1659 | if (hotplug_trigger) { |
| 1881 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | 1660 | u32 dig_hotplug_reg, pin_mask, long_mask; |
| 1661 | |||
| 1662 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | ||
| 1663 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | ||
| 1882 | 1664 | ||
| 1883 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); | 1665 | pch_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, |
| 1666 | dig_hotplug_reg, hpd_ibx); | ||
| 1667 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
| 1668 | } | ||
| 1884 | 1669 | ||
| 1885 | if (pch_iir & SDE_AUDIO_POWER_MASK) { | 1670 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
| 1886 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | 1671 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> |
| @@ -1972,12 +1757,16 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
| 1972 | struct drm_i915_private *dev_priv = dev->dev_private; | 1757 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1973 | int pipe; | 1758 | int pipe; |
| 1974 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; | 1759 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
| 1975 | u32 dig_hotplug_reg; | ||
| 1976 | 1760 | ||
| 1977 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | 1761 | if (hotplug_trigger) { |
| 1978 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | 1762 | u32 dig_hotplug_reg, pin_mask, long_mask; |
| 1979 | 1763 | ||
| 1980 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); | 1764 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); |
| 1765 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | ||
| 1766 | pch_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, | ||
| 1767 | dig_hotplug_reg, hpd_cpt); | ||
| 1768 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
| 1769 | } | ||
| 1981 | 1770 | ||
| 1982 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { | 1771 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
| 1983 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | 1772 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
| @@ -2176,8 +1965,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
| 2176 | static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) | 1965 | static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) |
| 2177 | { | 1966 | { |
| 2178 | struct drm_i915_private *dev_priv = dev->dev_private; | 1967 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2179 | uint32_t hp_control; | 1968 | u32 hp_control, hp_trigger; |
| 2180 | uint32_t hp_trigger; | 1969 | u32 pin_mask, long_mask; |
| 2181 | 1970 | ||
| 2182 | /* Get the status */ | 1971 | /* Get the status */ |
| 2183 | hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; | 1972 | hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; |
| @@ -2189,20 +1978,11 @@ static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) | |||
| 2189 | return; | 1978 | return; |
| 2190 | } | 1979 | } |
| 2191 | 1980 | ||
| 2192 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | ||
| 2193 | hp_control & BXT_HOTPLUG_CTL_MASK); | ||
| 2194 | |||
| 2195 | /* Check for HPD storm and schedule bottom half */ | ||
| 2196 | intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt); | ||
| 2197 | |||
| 2198 | /* | ||
| 2199 | * FIXME: Save the hot plug status for bottom half before | ||
| 2200 | * clearing the sticky status bits, else the status will be | ||
| 2201 | * lost. | ||
| 2202 | */ | ||
| 2203 | |||
| 2204 | /* Clear sticky bits in hpd status */ | 1981 | /* Clear sticky bits in hpd status */ |
| 2205 | I915_WRITE(BXT_HOTPLUG_CTL, hp_control); | 1982 | I915_WRITE(BXT_HOTPLUG_CTL, hp_control); |
| 1983 | |||
| 1984 | pch_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control, hpd_bxt); | ||
| 1985 | intel_hpd_irq_handler(dev, pin_mask, long_mask); | ||
| 2206 | } | 1986 | } |
| 2207 | 1987 | ||
| 2208 | static irqreturn_t gen8_irq_handler(int irq, void *arg) | 1988 | static irqreturn_t gen8_irq_handler(int irq, void *arg) |
| @@ -3203,12 +2983,12 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) | |||
| 3203 | if (HAS_PCH_IBX(dev)) { | 2983 | if (HAS_PCH_IBX(dev)) { |
| 3204 | hotplug_irqs = SDE_HOTPLUG_MASK; | 2984 | hotplug_irqs = SDE_HOTPLUG_MASK; |
| 3205 | for_each_intel_encoder(dev, intel_encoder) | 2985 | for_each_intel_encoder(dev, intel_encoder) |
| 3206 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | 2986 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) |
| 3207 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; | 2987 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
| 3208 | } else { | 2988 | } else { |
| 3209 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; | 2989 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
| 3210 | for_each_intel_encoder(dev, intel_encoder) | 2990 | for_each_intel_encoder(dev, intel_encoder) |
| 3211 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | 2991 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) |
| 3212 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; | 2992 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
| 3213 | } | 2993 | } |
| 3214 | 2994 | ||
| @@ -3237,7 +3017,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev) | |||
| 3237 | 3017 | ||
| 3238 | /* Now, enable HPD */ | 3018 | /* Now, enable HPD */ |
| 3239 | for_each_intel_encoder(dev, intel_encoder) { | 3019 | for_each_intel_encoder(dev, intel_encoder) { |
| 3240 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark | 3020 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state |
| 3241 | == HPD_ENABLED) | 3021 | == HPD_ENABLED) |
| 3242 | hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; | 3022 | hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; |
| 3243 | } | 3023 | } |
| @@ -4130,7 +3910,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev) | |||
| 4130 | /* Note HDMI and DP share hotplug bits */ | 3910 | /* Note HDMI and DP share hotplug bits */ |
| 4131 | /* enable bits are the same for all generations */ | 3911 | /* enable bits are the same for all generations */ |
| 4132 | for_each_intel_encoder(dev, intel_encoder) | 3912 | for_each_intel_encoder(dev, intel_encoder) |
| 4133 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | 3913 | if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) |
| 4134 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | 3914 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; |
| 4135 | /* Programming the CRT detection parameters tends | 3915 | /* Programming the CRT detection parameters tends |
| 4136 | to generate a spurious hotplug event about three | 3916 | to generate a spurious hotplug event about three |
| @@ -4270,46 +4050,6 @@ static void i965_irq_uninstall(struct drm_device * dev) | |||
| 4270 | I915_WRITE(IIR, I915_READ(IIR)); | 4050 | I915_WRITE(IIR, I915_READ(IIR)); |
| 4271 | } | 4051 | } |
| 4272 | 4052 | ||
| 4273 | static void intel_hpd_irq_reenable_work(struct work_struct *work) | ||
| 4274 | { | ||
| 4275 | struct drm_i915_private *dev_priv = | ||
| 4276 | container_of(work, typeof(*dev_priv), | ||
| 4277 | hotplug_reenable_work.work); | ||
| 4278 | struct drm_device *dev = dev_priv->dev; | ||
| 4279 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 4280 | int i; | ||
| 4281 | |||
| 4282 | intel_runtime_pm_get(dev_priv); | ||
| 4283 | |||
| 4284 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 4285 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | ||
| 4286 | struct drm_connector *connector; | ||
| 4287 | |||
| 4288 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | ||
| 4289 | continue; | ||
| 4290 | |||
| 4291 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | ||
| 4292 | |||
| 4293 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 4294 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
| 4295 | |||
| 4296 | if (intel_connector->encoder->hpd_pin == i) { | ||
| 4297 | if (connector->polled != intel_connector->polled) | ||
| 4298 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | ||
| 4299 | connector->name); | ||
| 4300 | connector->polled = intel_connector->polled; | ||
| 4301 | if (!connector->polled) | ||
| 4302 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 4303 | } | ||
| 4304 | } | ||
| 4305 | } | ||
| 4306 | if (dev_priv->display.hpd_irq_setup) | ||
| 4307 | dev_priv->display.hpd_irq_setup(dev); | ||
| 4308 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 4309 | |||
| 4310 | intel_runtime_pm_put(dev_priv); | ||
| 4311 | } | ||
| 4312 | |||
| 4313 | /** | 4053 | /** |
| 4314 | * intel_irq_init - initializes irq support | 4054 | * intel_irq_init - initializes irq support |
| 4315 | * @dev_priv: i915 device instance | 4055 | * @dev_priv: i915 device instance |
| @@ -4321,8 +4061,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
| 4321 | { | 4061 | { |
| 4322 | struct drm_device *dev = dev_priv->dev; | 4062 | struct drm_device *dev = dev_priv->dev; |
| 4323 | 4063 | ||
| 4324 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 4064 | intel_hpd_init_work(dev_priv); |
| 4325 | INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); | 4065 | |
| 4326 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); | 4066 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
| 4327 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); | 4067 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
| 4328 | 4068 | ||
| @@ -4335,8 +4075,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
| 4335 | 4075 | ||
| 4336 | INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, | 4076 | INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, |
| 4337 | i915_hangcheck_elapsed); | 4077 | i915_hangcheck_elapsed); |
| 4338 | INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, | ||
| 4339 | intel_hpd_irq_reenable_work); | ||
| 4340 | 4078 | ||
| 4341 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); | 4079 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
| 4342 | 4080 | ||
| @@ -4422,46 +4160,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
| 4422 | } | 4160 | } |
| 4423 | 4161 | ||
| 4424 | /** | 4162 | /** |
| 4425 | * intel_hpd_init - initializes and enables hpd support | ||
| 4426 | * @dev_priv: i915 device instance | ||
| 4427 | * | ||
| 4428 | * This function enables the hotplug support. It requires that interrupts have | ||
| 4429 | * already been enabled with intel_irq_init_hw(). From this point on hotplug and | ||
| 4430 | * poll request can run concurrently to other code, so locking rules must be | ||
| 4431 | * obeyed. | ||
| 4432 | * | ||
| 4433 | * This is a separate step from interrupt enabling to simplify the locking rules | ||
| 4434 | * in the driver load and resume code. | ||
| 4435 | */ | ||
| 4436 | void intel_hpd_init(struct drm_i915_private *dev_priv) | ||
| 4437 | { | ||
| 4438 | struct drm_device *dev = dev_priv->dev; | ||
| 4439 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 4440 | struct drm_connector *connector; | ||
| 4441 | int i; | ||
| 4442 | |||
| 4443 | for (i = 1; i < HPD_NUM_PINS; i++) { | ||
| 4444 | dev_priv->hpd_stats[i].hpd_cnt = 0; | ||
| 4445 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | ||
| 4446 | } | ||
| 4447 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 4448 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
| 4449 | connector->polled = intel_connector->polled; | ||
| 4450 | if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | ||
| 4451 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 4452 | if (intel_connector->mst_port) | ||
| 4453 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 4454 | } | ||
| 4455 | |||
| 4456 | /* Interrupt setup is already guaranteed to be single-threaded, this is | ||
| 4457 | * just to make the assert_spin_locked checks happy. */ | ||
| 4458 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 4459 | if (dev_priv->display.hpd_irq_setup) | ||
| 4460 | dev_priv->display.hpd_irq_setup(dev); | ||
| 4461 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 4462 | } | ||
| 4463 | |||
| 4464 | /** | ||
| 4465 | * intel_irq_install - enables the hardware interrupt | 4163 | * intel_irq_install - enables the hardware interrupt |
| 4466 | * @dev_priv: i915 device instance | 4164 | * @dev_priv: i915 device instance |
| 4467 | * | 4165 | * |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 8ac5a1b29ac0..5f4e7295295f 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
| @@ -28,7 +28,6 @@ struct i915_params i915 __read_mostly = { | |||
| 28 | .modeset = -1, | 28 | .modeset = -1, |
| 29 | .panel_ignore_lid = 1, | 29 | .panel_ignore_lid = 1, |
| 30 | .semaphores = -1, | 30 | .semaphores = -1, |
| 31 | .lvds_downclock = 0, | ||
| 32 | .lvds_channel_mode = 0, | 31 | .lvds_channel_mode = 0, |
| 33 | .panel_use_ssc = -1, | 32 | .panel_use_ssc = -1, |
| 34 | .vbt_sdvo_panel_type = -1, | 33 | .vbt_sdvo_panel_type = -1, |
| @@ -52,13 +51,12 @@ struct i915_params i915 __read_mostly = { | |||
| 52 | .use_mmio_flip = 0, | 51 | .use_mmio_flip = 0, |
| 53 | .mmio_debug = 0, | 52 | .mmio_debug = 0, |
| 54 | .verbose_state_checks = 1, | 53 | .verbose_state_checks = 1, |
| 55 | .nuclear_pageflip = 0, | ||
| 56 | .edp_vswing = 0, | 54 | .edp_vswing = 0, |
| 57 | }; | 55 | }; |
| 58 | 56 | ||
| 59 | module_param_named(modeset, i915.modeset, int, 0400); | 57 | module_param_named(modeset, i915.modeset, int, 0400); |
| 60 | MODULE_PARM_DESC(modeset, | 58 | MODULE_PARM_DESC(modeset, |
| 61 | "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " | 59 | "Use kernel modesetting [KMS] (0=disable, " |
| 62 | "1=on, -1=force vga console preference [default])"); | 60 | "1=on, -1=force vga console preference [default])"); |
| 63 | 61 | ||
| 64 | module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); | 62 | module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); |
| @@ -84,11 +82,6 @@ MODULE_PARM_DESC(enable_fbc, | |||
| 84 | "Enable frame buffer compression for power savings " | 82 | "Enable frame buffer compression for power savings " |
| 85 | "(default: -1 (use per-chip default))"); | 83 | "(default: -1 (use per-chip default))"); |
| 86 | 84 | ||
| 87 | module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400); | ||
| 88 | MODULE_PARM_DESC(lvds_downclock, | ||
| 89 | "Use panel (LVDS/eDP) downclocking for power savings " | ||
| 90 | "(default: false)"); | ||
| 91 | |||
| 92 | module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); | 85 | module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); |
| 93 | MODULE_PARM_DESC(lvds_channel_mode, | 86 | MODULE_PARM_DESC(lvds_channel_mode, |
| 94 | "Specify LVDS channel mode " | 87 | "Specify LVDS channel mode " |
| @@ -104,7 +97,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type, | |||
| 104 | "Override/Ignore selection of SDVO panel mode in the VBT " | 97 | "Override/Ignore selection of SDVO panel mode in the VBT " |
| 105 | "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); | 98 | "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); |
| 106 | 99 | ||
| 107 | module_param_named(reset, i915.reset, bool, 0600); | 100 | module_param_named_unsafe(reset, i915.reset, bool, 0600); |
| 108 | MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); | 101 | MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); |
| 109 | 102 | ||
| 110 | module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644); | 103 | module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644); |
| @@ -182,10 +175,6 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); | |||
| 182 | MODULE_PARM_DESC(verbose_state_checks, | 175 | MODULE_PARM_DESC(verbose_state_checks, |
| 183 | "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); | 176 | "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); |
| 184 | 177 | ||
| 185 | module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600); | ||
| 186 | MODULE_PARM_DESC(nuclear_pageflip, | ||
| 187 | "Force atomic modeset functionality; only planes work for now (default: false)."); | ||
| 188 | |||
| 189 | /* WA to get away with the default setting in VBT for early platforms.Will be removed */ | 178 | /* WA to get away with the default setting in VBT for early platforms.Will be removed */ |
| 190 | module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); | 179 | module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); |
| 191 | MODULE_PARM_DESC(edp_vswing, | 180 | MODULE_PARM_DESC(edp_vswing, |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2030f602cbf8..e9a95df639f0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -50,12 +50,17 @@ | |||
| 50 | 50 | ||
| 51 | /* PCI config space */ | 51 | /* PCI config space */ |
| 52 | 52 | ||
| 53 | #define HPLLCC 0xc0 /* 855 only */ | 53 | #define HPLLCC 0xc0 /* 85x only */ |
| 54 | #define GC_CLOCK_CONTROL_MASK (0xf << 0) | 54 | #define GC_CLOCK_CONTROL_MASK (0x7 << 0) |
| 55 | #define GC_CLOCK_133_200 (0 << 0) | 55 | #define GC_CLOCK_133_200 (0 << 0) |
| 56 | #define GC_CLOCK_100_200 (1 << 0) | 56 | #define GC_CLOCK_100_200 (1 << 0) |
| 57 | #define GC_CLOCK_100_133 (2 << 0) | 57 | #define GC_CLOCK_100_133 (2 << 0) |
| 58 | #define GC_CLOCK_166_250 (3 << 0) | 58 | #define GC_CLOCK_133_266 (3 << 0) |
| 59 | #define GC_CLOCK_133_200_2 (4 << 0) | ||
| 60 | #define GC_CLOCK_133_266_2 (5 << 0) | ||
| 61 | #define GC_CLOCK_166_266 (6 << 0) | ||
| 62 | #define GC_CLOCK_166_250 (7 << 0) | ||
| 63 | |||
| 59 | #define GCFGC2 0xda | 64 | #define GCFGC2 0xda |
| 60 | #define GCFGC 0xf0 /* 915+ only */ | 65 | #define GCFGC 0xf0 /* 915+ only */ |
| 61 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) | 66 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) |
| @@ -155,6 +160,7 @@ | |||
| 155 | #define GAM_ECOCHK 0x4090 | 160 | #define GAM_ECOCHK 0x4090 |
| 156 | #define BDW_DISABLE_HDC_INVALIDATION (1<<25) | 161 | #define BDW_DISABLE_HDC_INVALIDATION (1<<25) |
| 157 | #define ECOCHK_SNB_BIT (1<<10) | 162 | #define ECOCHK_SNB_BIT (1<<10) |
| 163 | #define ECOCHK_DIS_TLB (1<<8) | ||
| 158 | #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) | 164 | #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) |
| 159 | #define ECOCHK_PPGTT_CACHE64B (0x3<<3) | 165 | #define ECOCHK_PPGTT_CACHE64B (0x3<<3) |
| 160 | #define ECOCHK_PPGTT_CACHE4B (0x0<<3) | 166 | #define ECOCHK_PPGTT_CACHE4B (0x0<<3) |
| @@ -316,6 +322,8 @@ | |||
| 316 | #define MI_RESTORE_EXT_STATE_EN (1<<2) | 322 | #define MI_RESTORE_EXT_STATE_EN (1<<2) |
| 317 | #define MI_FORCE_RESTORE (1<<1) | 323 | #define MI_FORCE_RESTORE (1<<1) |
| 318 | #define MI_RESTORE_INHIBIT (1<<0) | 324 | #define MI_RESTORE_INHIBIT (1<<0) |
| 325 | #define HSW_MI_RS_SAVE_STATE_EN (1<<3) | ||
| 326 | #define HSW_MI_RS_RESTORE_STATE_EN (1<<2) | ||
| 319 | #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ | 327 | #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ |
| 320 | #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) | 328 | #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) |
| 321 | #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ | 329 | #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ |
| @@ -347,6 +355,8 @@ | |||
| 347 | #define MI_INVALIDATE_BSD (1<<7) | 355 | #define MI_INVALIDATE_BSD (1<<7) |
| 348 | #define MI_FLUSH_DW_USE_GTT (1<<2) | 356 | #define MI_FLUSH_DW_USE_GTT (1<<2) |
| 349 | #define MI_FLUSH_DW_USE_PPGTT (0<<2) | 357 | #define MI_FLUSH_DW_USE_PPGTT (0<<2) |
| 358 | #define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1) | ||
| 359 | #define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1) | ||
| 350 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 360 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
| 351 | #define MI_BATCH_NON_SECURE (1) | 361 | #define MI_BATCH_NON_SECURE (1) |
| 352 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ | 362 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ |
| @@ -356,6 +366,7 @@ | |||
| 356 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) | 366 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) |
| 357 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ | 367 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ |
| 358 | #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) | 368 | #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) |
| 369 | #define MI_BATCH_RESOURCE_STREAMER (1<<10) | ||
| 359 | 370 | ||
| 360 | #define MI_PREDICATE_SRC0 (0x2400) | 371 | #define MI_PREDICATE_SRC0 (0x2400) |
| 361 | #define MI_PREDICATE_SRC1 (0x2408) | 372 | #define MI_PREDICATE_SRC1 (0x2408) |
| @@ -410,6 +421,7 @@ | |||
| 410 | #define DISPLAY_PLANE_A (0<<20) | 421 | #define DISPLAY_PLANE_A (0<<20) |
| 411 | #define DISPLAY_PLANE_B (1<<20) | 422 | #define DISPLAY_PLANE_B (1<<20) |
| 412 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) | 423 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) |
| 424 | #define PIPE_CONTROL_FLUSH_L3 (1<<27) | ||
| 413 | #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ | 425 | #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ |
| 414 | #define PIPE_CONTROL_MMIO_WRITE (1<<23) | 426 | #define PIPE_CONTROL_MMIO_WRITE (1<<23) |
| 415 | #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) | 427 | #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) |
| @@ -426,6 +438,7 @@ | |||
| 426 | #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) | 438 | #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) |
| 427 | #define PIPE_CONTROL_NOTIFY (1<<8) | 439 | #define PIPE_CONTROL_NOTIFY (1<<8) |
| 428 | #define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ | 440 | #define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ |
| 441 | #define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5) | ||
| 429 | #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) | 442 | #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) |
| 430 | #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) | 443 | #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) |
| 431 | #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) | 444 | #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) |
| @@ -449,7 +462,6 @@ | |||
| 449 | #define MI_CLFLUSH MI_INSTR(0x27, 0) | 462 | #define MI_CLFLUSH MI_INSTR(0x27, 0) |
| 450 | #define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) | 463 | #define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) |
| 451 | #define MI_REPORT_PERF_COUNT_GGTT (1<<0) | 464 | #define MI_REPORT_PERF_COUNT_GGTT (1<<0) |
| 452 | #define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0) | ||
| 453 | #define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0) | 465 | #define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0) |
| 454 | #define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) | 466 | #define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) |
| 455 | #define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) | 467 | #define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) |
| @@ -1163,10 +1175,12 @@ enum skl_disp_power_wells { | |||
| 1163 | #define _PORT_PLL_EBB_0_A 0x162034 | 1175 | #define _PORT_PLL_EBB_0_A 0x162034 |
| 1164 | #define _PORT_PLL_EBB_0_B 0x6C034 | 1176 | #define _PORT_PLL_EBB_0_B 0x6C034 |
| 1165 | #define _PORT_PLL_EBB_0_C 0x6C340 | 1177 | #define _PORT_PLL_EBB_0_C 0x6C340 |
| 1166 | #define PORT_PLL_P1_MASK (0x07 << 13) | 1178 | #define PORT_PLL_P1_SHIFT 13 |
| 1167 | #define PORT_PLL_P1(x) ((x) << 13) | 1179 | #define PORT_PLL_P1_MASK (0x07 << PORT_PLL_P1_SHIFT) |
| 1168 | #define PORT_PLL_P2_MASK (0x1f << 8) | 1180 | #define PORT_PLL_P1(x) ((x) << PORT_PLL_P1_SHIFT) |
| 1169 | #define PORT_PLL_P2(x) ((x) << 8) | 1181 | #define PORT_PLL_P2_SHIFT 8 |
| 1182 | #define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT) | ||
| 1183 | #define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT) | ||
| 1170 | #define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \ | 1184 | #define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \ |
| 1171 | _PORT_PLL_EBB_0_B, \ | 1185 | _PORT_PLL_EBB_0_B, \ |
| 1172 | _PORT_PLL_EBB_0_C) | 1186 | _PORT_PLL_EBB_0_C) |
| @@ -1186,8 +1200,9 @@ enum skl_disp_power_wells { | |||
| 1186 | /* PORT_PLL_0_A */ | 1200 | /* PORT_PLL_0_A */ |
| 1187 | #define PORT_PLL_M2_MASK 0xFF | 1201 | #define PORT_PLL_M2_MASK 0xFF |
| 1188 | /* PORT_PLL_1_A */ | 1202 | /* PORT_PLL_1_A */ |
| 1189 | #define PORT_PLL_N_MASK (0x0F << 8) | 1203 | #define PORT_PLL_N_SHIFT 8 |
| 1190 | #define PORT_PLL_N(x) ((x) << 8) | 1204 | #define PORT_PLL_N_MASK (0x0F << PORT_PLL_N_SHIFT) |
| 1205 | #define PORT_PLL_N(x) ((x) << PORT_PLL_N_SHIFT) | ||
| 1191 | /* PORT_PLL_2_A */ | 1206 | /* PORT_PLL_2_A */ |
| 1192 | #define PORT_PLL_M2_FRAC_MASK 0x3FFFFF | 1207 | #define PORT_PLL_M2_FRAC_MASK 0x3FFFFF |
| 1193 | /* PORT_PLL_3_A */ | 1208 | /* PORT_PLL_3_A */ |
| @@ -1201,9 +1216,11 @@ enum skl_disp_power_wells { | |||
| 1201 | /* PORT_PLL_8_A */ | 1216 | /* PORT_PLL_8_A */ |
| 1202 | #define PORT_PLL_TARGET_CNT_MASK 0x3FF | 1217 | #define PORT_PLL_TARGET_CNT_MASK 0x3FF |
| 1203 | /* PORT_PLL_9_A */ | 1218 | /* PORT_PLL_9_A */ |
| 1204 | #define PORT_PLL_LOCK_THRESHOLD_MASK 0xe | 1219 | #define PORT_PLL_LOCK_THRESHOLD_SHIFT 1 |
| 1220 | #define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT) | ||
| 1205 | /* PORT_PLL_10_A */ | 1221 | /* PORT_PLL_10_A */ |
| 1206 | #define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27) | 1222 | #define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27) |
| 1223 | #define PORT_PLL_DCO_AMP_DEFAULT 15 | ||
| 1207 | #define PORT_PLL_DCO_AMP_MASK 0x3c00 | 1224 | #define PORT_PLL_DCO_AMP_MASK 0x3c00 |
| 1208 | #define PORT_PLL_DCO_AMP(x) (x<<10) | 1225 | #define PORT_PLL_DCO_AMP(x) (x<<10) |
| 1209 | #define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ | 1226 | #define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ |
| @@ -1377,6 +1394,18 @@ enum skl_disp_power_wells { | |||
| 1377 | _PORT_TX_DW14_LN0_C) + \ | 1394 | _PORT_TX_DW14_LN0_C) + \ |
| 1378 | _BXT_LANE_OFFSET(lane)) | 1395 | _BXT_LANE_OFFSET(lane)) |
| 1379 | 1396 | ||
| 1397 | /* UAIMI scratch pad register 1 */ | ||
| 1398 | #define UAIMI_SPR1 0x4F074 | ||
| 1399 | /* SKL VccIO mask */ | ||
| 1400 | #define SKL_VCCIO_MASK 0x1 | ||
| 1401 | /* SKL balance leg register */ | ||
| 1402 | #define DISPIO_CR_TX_BMU_CR0 0x6C00C | ||
| 1403 | /* I_boost values */ | ||
| 1404 | #define BALANCE_LEG_SHIFT(port) (8+3*(port)) | ||
| 1405 | #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) | ||
| 1406 | /* Balance leg disable bits */ | ||
| 1407 | #define BALANCE_LEG_DISABLE_SHIFT 23 | ||
| 1408 | |||
| 1380 | /* | 1409 | /* |
| 1381 | * Fence registers | 1410 | * Fence registers |
| 1382 | */ | 1411 | */ |
| @@ -1456,6 +1485,9 @@ enum skl_disp_power_wells { | |||
| 1456 | #define RING_MAX_IDLE(base) ((base)+0x54) | 1485 | #define RING_MAX_IDLE(base) ((base)+0x54) |
| 1457 | #define RING_HWS_PGA(base) ((base)+0x80) | 1486 | #define RING_HWS_PGA(base) ((base)+0x80) |
| 1458 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 1487 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
| 1488 | #define RING_RESET_CTL(base) ((base)+0xd0) | ||
| 1489 | #define RESET_CTL_REQUEST_RESET (1 << 0) | ||
| 1490 | #define RESET_CTL_READY_TO_RESET (1 << 1) | ||
| 1459 | 1491 | ||
| 1460 | #define HSW_GTT_CACHE_EN 0x4024 | 1492 | #define HSW_GTT_CACHE_EN 0x4024 |
| 1461 | #define GTT_CACHE_EN_ALL 0xF0007FFF | 1493 | #define GTT_CACHE_EN_ALL 0xF0007FFF |
| @@ -1946,6 +1978,9 @@ enum skl_disp_power_wells { | |||
| 1946 | #define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ | 1978 | #define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ |
| 1947 | #define FBC_TAG 0x03300 | 1979 | #define FBC_TAG 0x03300 |
| 1948 | 1980 | ||
| 1981 | #define FBC_STATUS2 0x43214 | ||
| 1982 | #define FBC_COMPRESSION_MASK 0x7ff | ||
| 1983 | |||
| 1949 | #define FBC_LL_SIZE (1536) | 1984 | #define FBC_LL_SIZE (1536) |
| 1950 | 1985 | ||
| 1951 | /* Framebuffer compression for GM45+ */ | 1986 | /* Framebuffer compression for GM45+ */ |
| @@ -2116,7 +2151,7 @@ enum skl_disp_power_wells { | |||
| 2116 | #define DPLL_DVO_2X_MODE (1 << 30) | 2151 | #define DPLL_DVO_2X_MODE (1 << 30) |
| 2117 | #define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) | 2152 | #define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) |
| 2118 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) | 2153 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) |
| 2119 | #define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) | 2154 | #define DPLL_REF_CLK_ENABLE_VLV (1 << 29) |
| 2120 | #define DPLL_VGA_MODE_DIS (1 << 28) | 2155 | #define DPLL_VGA_MODE_DIS (1 << 28) |
| 2121 | #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ | 2156 | #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ |
| 2122 | #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ | 2157 | #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ |
| @@ -2130,8 +2165,8 @@ enum skl_disp_power_wells { | |||
| 2130 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ | 2165 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ |
| 2131 | #define DPLL_LOCK_VLV (1<<15) | 2166 | #define DPLL_LOCK_VLV (1<<15) |
| 2132 | #define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14) | 2167 | #define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14) |
| 2133 | #define DPLL_INTEGRATED_CLOCK_VLV (1<<13) | 2168 | #define DPLL_INTEGRATED_REF_CLK_VLV (1<<13) |
| 2134 | #define DPLL_SSC_REF_CLOCK_CHV (1<<13) | 2169 | #define DPLL_SSC_REF_CLK_CHV (1<<13) |
| 2135 | #define DPLL_PORTC_READY_MASK (0xf << 4) | 2170 | #define DPLL_PORTC_READY_MASK (0xf << 4) |
| 2136 | #define DPLL_PORTB_READY_MASK (0xf) | 2171 | #define DPLL_PORTB_READY_MASK (0xf) |
| 2137 | 2172 | ||
| @@ -2488,6 +2523,9 @@ enum skl_disp_power_wells { | |||
| 2488 | #define CLKCFG_MEM_800 (3 << 4) | 2523 | #define CLKCFG_MEM_800 (3 << 4) |
| 2489 | #define CLKCFG_MEM_MASK (7 << 4) | 2524 | #define CLKCFG_MEM_MASK (7 << 4) |
| 2490 | 2525 | ||
| 2526 | #define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38) | ||
| 2527 | #define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f) | ||
| 2528 | |||
| 2491 | #define TSC1 0x11001 | 2529 | #define TSC1 0x11001 |
| 2492 | #define TSE (1<<0) | 2530 | #define TSE (1<<0) |
| 2493 | #define TR1 0x11006 | 2531 | #define TR1 0x11006 |
| @@ -2718,8 +2756,10 @@ enum skl_disp_power_wells { | |||
| 2718 | #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 | 2756 | #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 |
| 2719 | 2757 | ||
| 2720 | #define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) | 2758 | #define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) |
| 2759 | #define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070) | ||
| 2721 | #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) | 2760 | #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) |
| 2722 | #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) | 2761 | #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) |
| 2762 | #define BXT_RP_STATE_CAP 0x138170 | ||
| 2723 | 2763 | ||
| 2724 | #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) | 2764 | #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) |
| 2725 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) | 2765 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) |
| @@ -2767,7 +2807,8 @@ enum skl_disp_power_wells { | |||
| 2767 | * valid. Now, docs explain in dwords what is in the context object. The full | 2807 | * valid. Now, docs explain in dwords what is in the context object. The full |
| 2768 | * size is 70720 bytes, however, the power context and execlist context will | 2808 | * size is 70720 bytes, however, the power context and execlist context will |
| 2769 | * never be saved (power context is stored elsewhere, and execlists don't work | 2809 | * never be saved (power context is stored elsewhere, and execlists don't work |
| 2770 | * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. | 2810 | * on HSW) - so the final size, including the extra state required for the |
| 2811 | * Resource Streamer, is 66944 bytes, which rounds to 17 pages. | ||
| 2771 | */ | 2812 | */ |
| 2772 | #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) | 2813 | #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) |
| 2773 | /* Same as Haswell, but 72064 bytes now. */ | 2814 | /* Same as Haswell, but 72064 bytes now. */ |
| @@ -4398,9 +4439,32 @@ enum skl_disp_power_wells { | |||
| 4398 | #define DSPARB_BSTART_SHIFT 0 | 4439 | #define DSPARB_BSTART_SHIFT 0 |
| 4399 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ | 4440 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ |
| 4400 | #define DSPARB_AEND_SHIFT 0 | 4441 | #define DSPARB_AEND_SHIFT 0 |
| 4401 | 4442 | #define DSPARB_SPRITEA_SHIFT_VLV 0 | |
| 4443 | #define DSPARB_SPRITEA_MASK_VLV (0xff << 0) | ||
| 4444 | #define DSPARB_SPRITEB_SHIFT_VLV 8 | ||
| 4445 | #define DSPARB_SPRITEB_MASK_VLV (0xff << 8) | ||
| 4446 | #define DSPARB_SPRITEC_SHIFT_VLV 16 | ||
| 4447 | #define DSPARB_SPRITEC_MASK_VLV (0xff << 16) | ||
| 4448 | #define DSPARB_SPRITED_SHIFT_VLV 24 | ||
| 4449 | #define DSPARB_SPRITED_MASK_VLV (0xff << 24) | ||
| 4402 | #define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ | 4450 | #define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ |
| 4451 | #define DSPARB_SPRITEA_HI_SHIFT_VLV 0 | ||
| 4452 | #define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0) | ||
| 4453 | #define DSPARB_SPRITEB_HI_SHIFT_VLV 4 | ||
| 4454 | #define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4) | ||
| 4455 | #define DSPARB_SPRITEC_HI_SHIFT_VLV 8 | ||
| 4456 | #define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8) | ||
| 4457 | #define DSPARB_SPRITED_HI_SHIFT_VLV 12 | ||
| 4458 | #define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12) | ||
| 4459 | #define DSPARB_SPRITEE_HI_SHIFT_VLV 16 | ||
| 4460 | #define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16) | ||
| 4461 | #define DSPARB_SPRITEF_HI_SHIFT_VLV 20 | ||
| 4462 | #define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20) | ||
| 4403 | #define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ | 4463 | #define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ |
| 4464 | #define DSPARB_SPRITEE_SHIFT_VLV 0 | ||
| 4465 | #define DSPARB_SPRITEE_MASK_VLV (0xff << 0) | ||
| 4466 | #define DSPARB_SPRITEF_SHIFT_VLV 8 | ||
| 4467 | #define DSPARB_SPRITEF_MASK_VLV (0xff << 8) | ||
| 4404 | 4468 | ||
| 4405 | /* pnv/gen4/g4x/vlv/chv */ | 4469 | /* pnv/gen4/g4x/vlv/chv */ |
| 4406 | #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) | 4470 | #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) |
| @@ -5754,6 +5818,13 @@ enum skl_disp_power_wells { | |||
| 5754 | #define HSW_NDE_RSTWRN_OPT 0x46408 | 5818 | #define HSW_NDE_RSTWRN_OPT 0x46408 |
| 5755 | #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) | 5819 | #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) |
| 5756 | 5820 | ||
| 5821 | #define SKL_DFSM 0x51000 | ||
| 5822 | #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) | ||
| 5823 | #define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) | ||
| 5824 | #define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) | ||
| 5825 | #define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) | ||
| 5826 | #define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) | ||
| 5827 | |||
| 5757 | #define FF_SLICE_CS_CHICKEN2 0x20e4 | 5828 | #define FF_SLICE_CS_CHICKEN2 0x20e4 |
| 5758 | #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) | 5829 | #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) |
| 5759 | 5830 | ||
| @@ -5791,6 +5862,7 @@ enum skl_disp_power_wells { | |||
| 5791 | 5862 | ||
| 5792 | #define GEN8_L3SQCREG4 0xb118 | 5863 | #define GEN8_L3SQCREG4 0xb118 |
| 5793 | #define GEN8_LQSC_RO_PERF_DIS (1<<27) | 5864 | #define GEN8_LQSC_RO_PERF_DIS (1<<27) |
| 5865 | #define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21) | ||
| 5794 | 5866 | ||
| 5795 | /* GEN8 chicken */ | 5867 | /* GEN8 chicken */ |
| 5796 | #define HDC_CHICKEN0 0x7300 | 5868 | #define HDC_CHICKEN0 0x7300 |
| @@ -6047,6 +6119,9 @@ enum skl_disp_power_wells { | |||
| 6047 | #define _VIDEO_DIP_CTL_A 0xe0200 | 6119 | #define _VIDEO_DIP_CTL_A 0xe0200 |
| 6048 | #define _VIDEO_DIP_DATA_A 0xe0208 | 6120 | #define _VIDEO_DIP_DATA_A 0xe0208 |
| 6049 | #define _VIDEO_DIP_GCP_A 0xe0210 | 6121 | #define _VIDEO_DIP_GCP_A 0xe0210 |
| 6122 | #define GCP_COLOR_INDICATION (1 << 2) | ||
| 6123 | #define GCP_DEFAULT_PHASE_ENABLE (1 << 1) | ||
| 6124 | #define GCP_AV_MUTE (1 << 0) | ||
| 6050 | 6125 | ||
| 6051 | #define _VIDEO_DIP_CTL_B 0xe1200 | 6126 | #define _VIDEO_DIP_CTL_B 0xe1200 |
| 6052 | #define _VIDEO_DIP_DATA_B 0xe1208 | 6127 | #define _VIDEO_DIP_DATA_B 0xe1208 |
| @@ -6186,6 +6261,7 @@ enum skl_disp_power_wells { | |||
| 6186 | #define _TRANSA_CHICKEN1 0xf0060 | 6261 | #define _TRANSA_CHICKEN1 0xf0060 |
| 6187 | #define _TRANSB_CHICKEN1 0xf1060 | 6262 | #define _TRANSB_CHICKEN1 0xf1060 |
| 6188 | #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) | 6263 | #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) |
| 6264 | #define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10) | ||
| 6189 | #define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) | 6265 | #define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) |
| 6190 | #define _TRANSA_CHICKEN2 0xf0064 | 6266 | #define _TRANSA_CHICKEN2 0xf0064 |
| 6191 | #define _TRANSB_CHICKEN2 0xf1064 | 6267 | #define _TRANSB_CHICKEN2 0xf1064 |
| @@ -6370,6 +6446,8 @@ enum skl_disp_power_wells { | |||
| 6370 | #define PCH_PP_CONTROL 0xc7204 | 6446 | #define PCH_PP_CONTROL 0xc7204 |
| 6371 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | 6447 | #define PANEL_UNLOCK_REGS (0xabcd << 16) |
| 6372 | #define PANEL_UNLOCK_MASK (0xffff << 16) | 6448 | #define PANEL_UNLOCK_MASK (0xffff << 16) |
| 6449 | #define BXT_POWER_CYCLE_DELAY_MASK (0x1f0) | ||
| 6450 | #define BXT_POWER_CYCLE_DELAY_SHIFT 4 | ||
| 6373 | #define EDP_FORCE_VDD (1 << 3) | 6451 | #define EDP_FORCE_VDD (1 << 3) |
| 6374 | #define EDP_BLC_ENABLE (1 << 2) | 6452 | #define EDP_BLC_ENABLE (1 << 2) |
| 6375 | #define PANEL_POWER_RESET (1 << 1) | 6453 | #define PANEL_POWER_RESET (1 << 1) |
| @@ -6398,6 +6476,17 @@ enum skl_disp_power_wells { | |||
| 6398 | #define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) | 6476 | #define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) |
| 6399 | #define PANEL_POWER_CYCLE_DELAY_SHIFT 0 | 6477 | #define PANEL_POWER_CYCLE_DELAY_SHIFT 0 |
| 6400 | 6478 | ||
| 6479 | /* BXT PPS changes - 2nd set of PPS registers */ | ||
| 6480 | #define _BXT_PP_STATUS2 0xc7300 | ||
| 6481 | #define _BXT_PP_CONTROL2 0xc7304 | ||
| 6482 | #define _BXT_PP_ON_DELAYS2 0xc7308 | ||
| 6483 | #define _BXT_PP_OFF_DELAYS2 0xc730c | ||
| 6484 | |||
| 6485 | #define BXT_PP_STATUS(n) ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2) | ||
| 6486 | #define BXT_PP_CONTROL(n) ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2) | ||
| 6487 | #define BXT_PP_ON_DELAYS(n) ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2) | ||
| 6488 | #define BXT_PP_OFF_DELAYS(n) ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2) | ||
| 6489 | |||
| 6401 | #define PCH_DP_B 0xe4100 | 6490 | #define PCH_DP_B 0xe4100 |
| 6402 | #define PCH_DPB_AUX_CH_CTL 0xe4110 | 6491 | #define PCH_DPB_AUX_CH_CTL 0xe4110 |
| 6403 | #define PCH_DPB_AUX_CH_DATA1 0xe4114 | 6492 | #define PCH_DPB_AUX_CH_DATA1 0xe4114 |
| @@ -6698,6 +6787,7 @@ enum skl_disp_power_wells { | |||
| 6698 | #define GEN6_PCODE_READ_RC6VIDS 0x5 | 6787 | #define GEN6_PCODE_READ_RC6VIDS 0x5 |
| 6699 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) | 6788 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) |
| 6700 | #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) | 6789 | #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) |
| 6790 | #define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18 | ||
| 6701 | #define GEN9_PCODE_READ_MEM_LATENCY 0x6 | 6791 | #define GEN9_PCODE_READ_MEM_LATENCY 0x6 |
| 6702 | #define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF | 6792 | #define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF |
| 6703 | #define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 | 6793 | #define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 |
| @@ -7163,6 +7253,7 @@ enum skl_disp_power_wells { | |||
| 7163 | #define LCPLL_CLK_FREQ_337_5_BDW (2<<26) | 7253 | #define LCPLL_CLK_FREQ_337_5_BDW (2<<26) |
| 7164 | #define LCPLL_CLK_FREQ_675_BDW (3<<26) | 7254 | #define LCPLL_CLK_FREQ_675_BDW (3<<26) |
| 7165 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) | 7255 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) |
| 7256 | #define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24) | ||
| 7166 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) | 7257 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) |
| 7167 | #define LCPLL_POWER_DOWN_ALLOW (1<<22) | 7258 | #define LCPLL_POWER_DOWN_ALLOW (1<<22) |
| 7168 | #define LCPLL_CD_SOURCE_FCLK (1<<21) | 7259 | #define LCPLL_CD_SOURCE_FCLK (1<<21) |
| @@ -7265,12 +7356,6 @@ enum skl_disp_power_wells { | |||
| 7265 | #define DC_STATE_EN 0x45504 | 7356 | #define DC_STATE_EN 0x45504 |
| 7266 | #define DC_STATE_EN_UPTO_DC5 (1<<0) | 7357 | #define DC_STATE_EN_UPTO_DC5 (1<<0) |
| 7267 | #define DC_STATE_EN_DC9 (1<<3) | 7358 | #define DC_STATE_EN_DC9 (1<<3) |
| 7268 | |||
| 7269 | /* | ||
| 7270 | * SKL DC | ||
| 7271 | */ | ||
| 7272 | #define DC_STATE_EN 0x45504 | ||
| 7273 | #define DC_STATE_EN_UPTO_DC5 (1<<0) | ||
| 7274 | #define DC_STATE_EN_UPTO_DC6 (2<<0) | 7359 | #define DC_STATE_EN_UPTO_DC6 (2<<0) |
| 7275 | #define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 | 7360 | #define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 |
| 7276 | 7361 | ||
| @@ -7822,4 +7907,13 @@ enum skl_disp_power_wells { | |||
| 7822 | #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) | 7907 | #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) |
| 7823 | #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) | 7908 | #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) |
| 7824 | 7909 | ||
| 7910 | /* MOCS (Memory Object Control State) registers */ | ||
| 7911 | #define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */ | ||
| 7912 | |||
| 7913 | #define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/ | ||
| 7914 | #define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/ | ||
| 7915 | #define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/ | ||
| 7916 | #define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/ | ||
| 7917 | #define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/ | ||
| 7918 | |||
| 7825 | #endif /* _I915_REG_H_ */ | 7919 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index cf67f82f7b7f..1ccac618468e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
| @@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev) | |||
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | /* only restore FBC info on the platform that supports FBC*/ | 94 | /* only restore FBC info on the platform that supports FBC*/ |
| 95 | intel_fbc_disable(dev); | 95 | intel_fbc_disable(dev_priv); |
| 96 | 96 | ||
| 97 | /* restore FBC interval */ | 97 | /* restore FBC interval */ |
| 98 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) | 98 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 247626885f49..55bd04c6b939 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
| @@ -64,24 +64,16 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) | |||
| 64 | goto out; | 64 | goto out; |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | units = 0; | 67 | if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) { |
| 68 | div = 1000000ULL; | ||
| 69 | |||
| 70 | if (IS_CHERRYVIEW(dev)) { | ||
| 71 | /* Special case for 320Mhz */ | 68 | /* Special case for 320Mhz */ |
| 72 | if (czcount_30ns == 1) { | 69 | div = 10000000ULL; |
| 73 | div = 10000000ULL; | 70 | units = 3125ULL; |
| 74 | units = 3125ULL; | 71 | } else { |
| 75 | } else { | 72 | czcount_30ns += 1; |
| 76 | /* chv counts are one less */ | 73 | div = 1000000ULL; |
| 77 | czcount_30ns += 1; | 74 | units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns); |
| 78 | } | ||
| 79 | } | 75 | } |
| 80 | 76 | ||
| 81 | if (units == 0) | ||
| 82 | units = DIV_ROUND_UP_ULL(30ULL * bias, | ||
| 83 | (u64)czcount_30ns); | ||
| 84 | |||
| 85 | if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) | 77 | if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) |
| 86 | units <<= 8; | 78 | units <<= 8; |
| 87 | 79 | ||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 849a2590e010..2f34c47bd4bf 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
| @@ -424,10 +424,10 @@ TRACE_EVENT(i915_gem_evict_vm, | |||
| 424 | ); | 424 | ); |
| 425 | 425 | ||
| 426 | TRACE_EVENT(i915_gem_ring_sync_to, | 426 | TRACE_EVENT(i915_gem_ring_sync_to, |
| 427 | TP_PROTO(struct intel_engine_cs *from, | 427 | TP_PROTO(struct drm_i915_gem_request *to_req, |
| 428 | struct intel_engine_cs *to, | 428 | struct intel_engine_cs *from, |
| 429 | struct drm_i915_gem_request *req), | 429 | struct drm_i915_gem_request *req), |
| 430 | TP_ARGS(from, to, req), | 430 | TP_ARGS(to_req, from, req), |
| 431 | 431 | ||
| 432 | TP_STRUCT__entry( | 432 | TP_STRUCT__entry( |
| 433 | __field(u32, dev) | 433 | __field(u32, dev) |
| @@ -439,7 +439,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, | |||
| 439 | TP_fast_assign( | 439 | TP_fast_assign( |
| 440 | __entry->dev = from->dev->primary->index; | 440 | __entry->dev = from->dev->primary->index; |
| 441 | __entry->sync_from = from->id; | 441 | __entry->sync_from = from->id; |
| 442 | __entry->sync_to = to->id; | 442 | __entry->sync_to = to_req->ring->id; |
| 443 | __entry->seqno = i915_gem_request_get_seqno(req); | 443 | __entry->seqno = i915_gem_request_get_seqno(req); |
| 444 | ), | 444 | ), |
| 445 | 445 | ||
| @@ -475,8 +475,8 @@ TRACE_EVENT(i915_gem_ring_dispatch, | |||
| 475 | ); | 475 | ); |
| 476 | 476 | ||
| 477 | TRACE_EVENT(i915_gem_ring_flush, | 477 | TRACE_EVENT(i915_gem_ring_flush, |
| 478 | TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush), | 478 | TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush), |
| 479 | TP_ARGS(ring, invalidate, flush), | 479 | TP_ARGS(req, invalidate, flush), |
| 480 | 480 | ||
| 481 | TP_STRUCT__entry( | 481 | TP_STRUCT__entry( |
| 482 | __field(u32, dev) | 482 | __field(u32, dev) |
| @@ -486,8 +486,8 @@ TRACE_EVENT(i915_gem_ring_flush, | |||
| 486 | ), | 486 | ), |
| 487 | 487 | ||
| 488 | TP_fast_assign( | 488 | TP_fast_assign( |
| 489 | __entry->dev = ring->dev->primary->index; | 489 | __entry->dev = req->ring->dev->primary->index; |
| 490 | __entry->ring = ring->id; | 490 | __entry->ring = req->ring->id; |
| 491 | __entry->invalidate = invalidate; | 491 | __entry->invalidate = invalidate; |
| 492 | __entry->flush = flush; | 492 | __entry->flush = flush; |
| 493 | ), | 493 | ), |
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 7ed8033aae60..e2531cf59266 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
| @@ -35,162 +35,6 @@ | |||
| 35 | #include <drm/drm_plane_helper.h> | 35 | #include <drm/drm_plane_helper.h> |
| 36 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
| 37 | 37 | ||
| 38 | |||
| 39 | /** | ||
| 40 | * intel_atomic_check - validate state object | ||
| 41 | * @dev: drm device | ||
| 42 | * @state: state to validate | ||
| 43 | */ | ||
| 44 | int intel_atomic_check(struct drm_device *dev, | ||
| 45 | struct drm_atomic_state *state) | ||
| 46 | { | ||
| 47 | int nplanes = dev->mode_config.num_total_plane; | ||
| 48 | int ncrtcs = dev->mode_config.num_crtc; | ||
| 49 | int nconnectors = dev->mode_config.num_connector; | ||
| 50 | enum pipe nuclear_pipe = INVALID_PIPE; | ||
| 51 | struct intel_crtc *nuclear_crtc = NULL; | ||
| 52 | struct intel_crtc_state *crtc_state = NULL; | ||
| 53 | int ret; | ||
| 54 | int i; | ||
| 55 | bool not_nuclear = false; | ||
| 56 | |||
| 57 | /* | ||
| 58 | * FIXME: At the moment, we only support "nuclear pageflip" on a | ||
| 59 | * single CRTC. Cross-crtc updates will be added later. | ||
| 60 | */ | ||
| 61 | for (i = 0; i < nplanes; i++) { | ||
| 62 | struct intel_plane *plane = to_intel_plane(state->planes[i]); | ||
| 63 | if (!plane) | ||
| 64 | continue; | ||
| 65 | |||
| 66 | if (nuclear_pipe == INVALID_PIPE) { | ||
| 67 | nuclear_pipe = plane->pipe; | ||
| 68 | } else if (nuclear_pipe != plane->pipe) { | ||
| 69 | DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n"); | ||
| 70 | return -EINVAL; | ||
| 71 | } | ||
| 72 | } | ||
| 73 | |||
| 74 | /* | ||
| 75 | * FIXME: We only handle planes for now; make sure there are no CRTC's | ||
| 76 | * or connectors involved. | ||
| 77 | */ | ||
| 78 | state->allow_modeset = false; | ||
| 79 | for (i = 0; i < ncrtcs; i++) { | ||
| 80 | struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]); | ||
| 81 | if (crtc) | ||
| 82 | memset(&crtc->atomic, 0, sizeof(crtc->atomic)); | ||
| 83 | if (crtc && crtc->pipe != nuclear_pipe) | ||
| 84 | not_nuclear = true; | ||
| 85 | if (crtc && crtc->pipe == nuclear_pipe) { | ||
| 86 | nuclear_crtc = crtc; | ||
| 87 | crtc_state = to_intel_crtc_state(state->crtc_states[i]); | ||
| 88 | } | ||
| 89 | } | ||
| 90 | for (i = 0; i < nconnectors; i++) | ||
| 91 | if (state->connectors[i] != NULL) | ||
| 92 | not_nuclear = true; | ||
| 93 | |||
| 94 | if (not_nuclear) { | ||
| 95 | DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n"); | ||
| 96 | return -EINVAL; | ||
| 97 | } | ||
| 98 | |||
| 99 | ret = drm_atomic_helper_check_planes(dev, state); | ||
| 100 | if (ret) | ||
| 101 | return ret; | ||
| 102 | |||
| 103 | /* FIXME: move to crtc atomic check function once it is ready */ | ||
| 104 | ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state); | ||
| 105 | if (ret) | ||
| 106 | return ret; | ||
| 107 | |||
| 108 | return ret; | ||
| 109 | } | ||
| 110 | |||
| 111 | |||
| 112 | /** | ||
| 113 | * intel_atomic_commit - commit validated state object | ||
| 114 | * @dev: DRM device | ||
| 115 | * @state: the top-level driver state object | ||
| 116 | * @async: asynchronous commit | ||
| 117 | * | ||
| 118 | * This function commits a top-level state object that has been validated | ||
| 119 | * with drm_atomic_helper_check(). | ||
| 120 | * | ||
| 121 | * FIXME: Atomic modeset support for i915 is not yet complete. At the moment | ||
| 122 | * we can only handle plane-related operations and do not yet support | ||
| 123 | * asynchronous commit. | ||
| 124 | * | ||
| 125 | * RETURNS | ||
| 126 | * Zero for success or -errno. | ||
| 127 | */ | ||
| 128 | int intel_atomic_commit(struct drm_device *dev, | ||
| 129 | struct drm_atomic_state *state, | ||
| 130 | bool async) | ||
| 131 | { | ||
| 132 | int ret; | ||
| 133 | int i; | ||
| 134 | |||
| 135 | if (async) { | ||
| 136 | DRM_DEBUG_KMS("i915 does not yet support async commit\n"); | ||
| 137 | return -EINVAL; | ||
| 138 | } | ||
| 139 | |||
| 140 | ret = drm_atomic_helper_prepare_planes(dev, state); | ||
| 141 | if (ret) | ||
| 142 | return ret; | ||
| 143 | |||
| 144 | /* Point of no return */ | ||
| 145 | |||
| 146 | /* | ||
| 147 | * FIXME: The proper sequence here will eventually be: | ||
| 148 | * | ||
| 149 | * drm_atomic_helper_swap_state(dev, state) | ||
| 150 | * drm_atomic_helper_commit_modeset_disables(dev, state); | ||
| 151 | * drm_atomic_helper_commit_planes(dev, state); | ||
| 152 | * drm_atomic_helper_commit_modeset_enables(dev, state); | ||
| 153 | * drm_atomic_helper_wait_for_vblanks(dev, state); | ||
| 154 | * drm_atomic_helper_cleanup_planes(dev, state); | ||
| 155 | * drm_atomic_state_free(state); | ||
| 156 | * | ||
| 157 | * once we have full atomic modeset. For now, just manually update | ||
| 158 | * plane states to avoid clobbering good states with dummy states | ||
| 159 | * while nuclear pageflipping. | ||
| 160 | */ | ||
| 161 | for (i = 0; i < dev->mode_config.num_total_plane; i++) { | ||
| 162 | struct drm_plane *plane = state->planes[i]; | ||
| 163 | |||
| 164 | if (!plane) | ||
| 165 | continue; | ||
| 166 | |||
| 167 | plane->state->state = state; | ||
| 168 | swap(state->plane_states[i], plane->state); | ||
| 169 | plane->state->state = NULL; | ||
| 170 | } | ||
| 171 | |||
| 172 | /* swap crtc_scaler_state */ | ||
| 173 | for (i = 0; i < dev->mode_config.num_crtc; i++) { | ||
| 174 | struct drm_crtc *crtc = state->crtcs[i]; | ||
| 175 | if (!crtc) { | ||
| 176 | continue; | ||
| 177 | } | ||
| 178 | |||
| 179 | to_intel_crtc(crtc)->config->scaler_state = | ||
| 180 | to_intel_crtc_state(state->crtc_states[i])->scaler_state; | ||
| 181 | |||
| 182 | if (INTEL_INFO(dev)->gen >= 9) | ||
| 183 | skl_detach_scalers(to_intel_crtc(crtc)); | ||
| 184 | } | ||
| 185 | |||
| 186 | drm_atomic_helper_commit_planes(dev, state); | ||
| 187 | drm_atomic_helper_wait_for_vblanks(dev, state); | ||
| 188 | drm_atomic_helper_cleanup_planes(dev, state); | ||
| 189 | drm_atomic_state_free(state); | ||
| 190 | |||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | 38 | /** |
| 195 | * intel_connector_atomic_get_property - fetch connector property value | 39 | * intel_connector_atomic_get_property - fetch connector property value |
| 196 | * @connector: connector to fetch property for | 40 | * @connector: connector to fetch property for |
| @@ -298,17 +142,12 @@ int intel_atomic_setup_scalers(struct drm_device *dev, | |||
| 298 | struct drm_plane *plane = NULL; | 142 | struct drm_plane *plane = NULL; |
| 299 | struct intel_plane *intel_plane; | 143 | struct intel_plane *intel_plane; |
| 300 | struct intel_plane_state *plane_state = NULL; | 144 | struct intel_plane_state *plane_state = NULL; |
| 301 | struct intel_crtc_scaler_state *scaler_state; | 145 | struct intel_crtc_scaler_state *scaler_state = |
| 302 | struct drm_atomic_state *drm_state; | 146 | &crtc_state->scaler_state; |
| 147 | struct drm_atomic_state *drm_state = crtc_state->base.state; | ||
| 303 | int num_scalers_need; | 148 | int num_scalers_need; |
| 304 | int i, j; | 149 | int i, j; |
| 305 | 150 | ||
| 306 | if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state) | ||
| 307 | return 0; | ||
| 308 | |||
| 309 | scaler_state = &crtc_state->scaler_state; | ||
| 310 | drm_state = crtc_state->base.state; | ||
| 311 | |||
| 312 | num_scalers_need = hweight32(scaler_state->scaler_users); | 151 | num_scalers_need = hweight32(scaler_state->scaler_users); |
| 313 | DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n", | 152 | DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n", |
| 314 | crtc_state, num_scalers_need, intel_crtc->num_scalers, | 153 | crtc_state, num_scalers_need, intel_crtc->num_scalers, |
| @@ -336,17 +175,21 @@ int intel_atomic_setup_scalers(struct drm_device *dev, | |||
| 336 | /* walkthrough scaler_users bits and start assigning scalers */ | 175 | /* walkthrough scaler_users bits and start assigning scalers */ |
| 337 | for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { | 176 | for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { |
| 338 | int *scaler_id; | 177 | int *scaler_id; |
| 178 | const char *name; | ||
| 179 | int idx; | ||
| 339 | 180 | ||
| 340 | /* skip if scaler not required */ | 181 | /* skip if scaler not required */ |
| 341 | if (!(scaler_state->scaler_users & (1 << i))) | 182 | if (!(scaler_state->scaler_users & (1 << i))) |
| 342 | continue; | 183 | continue; |
| 343 | 184 | ||
| 344 | if (i == SKL_CRTC_INDEX) { | 185 | if (i == SKL_CRTC_INDEX) { |
| 186 | name = "CRTC"; | ||
| 187 | idx = intel_crtc->base.base.id; | ||
| 188 | |||
| 345 | /* panel fitter case: assign as a crtc scaler */ | 189 | /* panel fitter case: assign as a crtc scaler */ |
| 346 | scaler_id = &scaler_state->scaler_id; | 190 | scaler_id = &scaler_state->scaler_id; |
| 347 | } else { | 191 | } else { |
| 348 | if (!drm_state) | 192 | name = "PLANE"; |
| 349 | continue; | ||
| 350 | 193 | ||
| 351 | /* plane scaler case: assign as a plane scaler */ | 194 | /* plane scaler case: assign as a plane scaler */ |
| 352 | /* find the plane that set the bit as scaler_user */ | 195 | /* find the plane that set the bit as scaler_user */ |
| @@ -365,9 +208,19 @@ int intel_atomic_setup_scalers(struct drm_device *dev, | |||
| 365 | plane->base.id); | 208 | plane->base.id); |
| 366 | return PTR_ERR(state); | 209 | return PTR_ERR(state); |
| 367 | } | 210 | } |
| 211 | |||
| 212 | /* | ||
| 213 | * the plane is added after plane checks are run, | ||
| 214 | * but since this plane is unchanged just do the | ||
| 215 | * minimum required validation. | ||
| 216 | */ | ||
| 217 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) | ||
| 218 | intel_crtc->atomic.wait_for_flips = true; | ||
| 219 | crtc_state->base.planes_changed = true; | ||
| 368 | } | 220 | } |
| 369 | 221 | ||
| 370 | intel_plane = to_intel_plane(plane); | 222 | intel_plane = to_intel_plane(plane); |
| 223 | idx = plane->base.id; | ||
| 371 | 224 | ||
| 372 | /* plane on different crtc cannot be a scaler user of this crtc */ | 225 | /* plane on different crtc cannot be a scaler user of this crtc */ |
| 373 | if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { | 226 | if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { |
| @@ -383,23 +236,16 @@ int intel_atomic_setup_scalers(struct drm_device *dev, | |||
| 383 | for (j = 0; j < intel_crtc->num_scalers; j++) { | 236 | for (j = 0; j < intel_crtc->num_scalers; j++) { |
| 384 | if (!scaler_state->scalers[j].in_use) { | 237 | if (!scaler_state->scalers[j].in_use) { |
| 385 | scaler_state->scalers[j].in_use = 1; | 238 | scaler_state->scalers[j].in_use = 1; |
| 386 | *scaler_id = scaler_state->scalers[j].id; | 239 | *scaler_id = j; |
| 387 | DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", | 240 | DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", |
| 388 | intel_crtc->pipe, | 241 | intel_crtc->pipe, *scaler_id, name, idx); |
| 389 | i == SKL_CRTC_INDEX ? scaler_state->scaler_id : | ||
| 390 | plane_state->scaler_id, | ||
| 391 | i == SKL_CRTC_INDEX ? "CRTC" : "PLANE", | ||
| 392 | i == SKL_CRTC_INDEX ? intel_crtc->base.base.id : | ||
| 393 | plane->base.id); | ||
| 394 | break; | 242 | break; |
| 395 | } | 243 | } |
| 396 | } | 244 | } |
| 397 | } | 245 | } |
| 398 | 246 | ||
| 399 | if (WARN_ON(*scaler_id < 0)) { | 247 | if (WARN_ON(*scaler_id < 0)) { |
| 400 | DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", | 248 | DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx); |
| 401 | i == SKL_CRTC_INDEX ? "CRTC" : "PLANE", | ||
| 402 | i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id); | ||
| 403 | continue; | 249 | continue; |
| 404 | } | 250 | } |
| 405 | 251 | ||
| @@ -421,3 +267,54 @@ int intel_atomic_setup_scalers(struct drm_device *dev, | |||
| 421 | 267 | ||
| 422 | return 0; | 268 | return 0; |
| 423 | } | 269 | } |
| 270 | |||
| 271 | static void | ||
| 272 | intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, | ||
| 273 | struct intel_shared_dpll_config *shared_dpll) | ||
| 274 | { | ||
| 275 | enum intel_dpll_id i; | ||
| 276 | |||
| 277 | /* Copy shared dpll state */ | ||
| 278 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
| 279 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
| 280 | |||
| 281 | shared_dpll[i] = pll->config; | ||
| 282 | } | ||
| 283 | } | ||
| 284 | |||
| 285 | struct intel_shared_dpll_config * | ||
| 286 | intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) | ||
| 287 | { | ||
| 288 | struct intel_atomic_state *state = to_intel_atomic_state(s); | ||
| 289 | |||
| 290 | WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); | ||
| 291 | |||
| 292 | if (!state->dpll_set) { | ||
| 293 | state->dpll_set = true; | ||
| 294 | |||
| 295 | intel_atomic_duplicate_dpll_state(to_i915(s->dev), | ||
| 296 | state->shared_dpll); | ||
| 297 | } | ||
| 298 | |||
| 299 | return state->shared_dpll; | ||
| 300 | } | ||
| 301 | |||
| 302 | struct drm_atomic_state * | ||
| 303 | intel_atomic_state_alloc(struct drm_device *dev) | ||
| 304 | { | ||
| 305 | struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
| 306 | |||
| 307 | if (!state || drm_atomic_state_init(dev, &state->base) < 0) { | ||
| 308 | kfree(state); | ||
| 309 | return NULL; | ||
| 310 | } | ||
| 311 | |||
| 312 | return &state->base; | ||
| 313 | } | ||
| 314 | |||
| 315 | void intel_atomic_state_clear(struct drm_atomic_state *s) | ||
| 316 | { | ||
| 317 | struct intel_atomic_state *state = to_intel_atomic_state(s); | ||
| 318 | drm_atomic_state_default_clear(&state->base); | ||
| 319 | state->dpll_set = false; | ||
| 320 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 86ba4b2c3a65..f1ab8e4b9c11 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c | |||
| @@ -56,6 +56,7 @@ intel_create_plane_state(struct drm_plane *plane) | |||
| 56 | 56 | ||
| 57 | state->base.plane = plane; | 57 | state->base.plane = plane; |
| 58 | state->base.rotation = BIT(DRM_ROTATE_0); | 58 | state->base.rotation = BIT(DRM_ROTATE_0); |
| 59 | state->ckey.flags = I915_SET_COLORKEY_NONE; | ||
| 59 | 60 | ||
| 60 | return state; | 61 | return state; |
| 61 | } | 62 | } |
| @@ -114,8 +115,10 @@ static int intel_plane_atomic_check(struct drm_plane *plane, | |||
| 114 | struct intel_crtc_state *crtc_state; | 115 | struct intel_crtc_state *crtc_state; |
| 115 | struct intel_plane *intel_plane = to_intel_plane(plane); | 116 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 116 | struct intel_plane_state *intel_state = to_intel_plane_state(state); | 117 | struct intel_plane_state *intel_state = to_intel_plane_state(state); |
| 118 | struct drm_crtc_state *drm_crtc_state; | ||
| 119 | int ret; | ||
| 117 | 120 | ||
| 118 | crtc = crtc ? crtc : plane->crtc; | 121 | crtc = crtc ? crtc : plane->state->crtc; |
| 119 | intel_crtc = to_intel_crtc(crtc); | 122 | intel_crtc = to_intel_crtc(crtc); |
| 120 | 123 | ||
| 121 | /* | 124 | /* |
| @@ -127,16 +130,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane, | |||
| 127 | if (!crtc) | 130 | if (!crtc) |
| 128 | return 0; | 131 | return 0; |
| 129 | 132 | ||
| 130 | /* FIXME: temporary hack necessary while we still use the plane update | 133 | drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); |
| 131 | * helper. */ | 134 | if (WARN_ON(!drm_crtc_state)) |
| 132 | if (state->state) { | 135 | return -EINVAL; |
| 133 | crtc_state = | 136 | |
| 134 | intel_atomic_get_crtc_state(state->state, intel_crtc); | 137 | crtc_state = to_intel_crtc_state(drm_crtc_state); |
| 135 | if (IS_ERR(crtc_state)) | ||
| 136 | return PTR_ERR(crtc_state); | ||
| 137 | } else { | ||
| 138 | crtc_state = intel_crtc->config; | ||
| 139 | } | ||
| 140 | 138 | ||
| 141 | /* | 139 | /* |
| 142 | * The original src/dest coordinates are stored in state->base, but | 140 | * The original src/dest coordinates are stored in state->base, but |
| @@ -160,20 +158,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane, | |||
| 160 | intel_state->clip.y2 = | 158 | intel_state->clip.y2 = |
| 161 | crtc_state->base.active ? crtc_state->pipe_src_h : 0; | 159 | crtc_state->base.active ? crtc_state->pipe_src_h : 0; |
| 162 | 160 | ||
| 163 | /* | ||
| 164 | * Disabling a plane is always okay; we just need to update | ||
| 165 | * fb tracking in a special way since cleanup_fb() won't | ||
| 166 | * get called by the plane helpers. | ||
| 167 | */ | ||
| 168 | if (state->fb == NULL && plane->state->fb != NULL) { | ||
| 169 | /* | ||
| 170 | * 'prepare' is never called when plane is being disabled, so | ||
| 171 | * we need to handle frontbuffer tracking as a special case | ||
| 172 | */ | ||
| 173 | intel_crtc->atomic.disabled_planes |= | ||
| 174 | (1 << drm_plane_index(plane)); | ||
| 175 | } | ||
| 176 | |||
| 177 | if (state->fb && intel_rotation_90_or_270(state->rotation)) { | 161 | if (state->fb && intel_rotation_90_or_270(state->rotation)) { |
| 178 | if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || | 162 | if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || |
| 179 | state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) { | 163 | state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) { |
| @@ -198,7 +182,12 @@ static int intel_plane_atomic_check(struct drm_plane *plane, | |||
| 198 | } | 182 | } |
| 199 | } | 183 | } |
| 200 | 184 | ||
| 201 | return intel_plane->check_plane(plane, intel_state); | 185 | intel_state->visible = false; |
| 186 | ret = intel_plane->check_plane(plane, crtc_state, intel_state); | ||
| 187 | if (ret) | ||
| 188 | return ret; | ||
| 189 | |||
| 190 | return intel_plane_atomic_calc_changes(&crtc_state->base, state); | ||
| 202 | } | 191 | } |
| 203 | 192 | ||
| 204 | static void intel_plane_atomic_update(struct drm_plane *plane, | 193 | static void intel_plane_atomic_update(struct drm_plane *plane, |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 3da9b8409f20..dc32cf4585f8 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
| @@ -41,7 +41,8 @@ | |||
| 41 | * | 41 | * |
| 42 | * The disable sequences must be performed before disabling the transcoder or | 42 | * The disable sequences must be performed before disabling the transcoder or |
| 43 | * port. The enable sequences may only be performed after enabling the | 43 | * port. The enable sequences may only be performed after enabling the |
| 44 | * transcoder and port, and after completed link training. | 44 | * transcoder and port, and after completed link training. Therefore the audio |
| 45 | * enable/disable sequences are part of the modeset sequence. | ||
| 45 | * | 46 | * |
| 46 | * The codec and controller sequences could be done either parallel or serial, | 47 | * The codec and controller sequences could be done either parallel or serial, |
| 47 | * but generally the ELDV/PD change in the codec sequence indicates to the audio | 48 | * but generally the ELDV/PD change in the codec sequence indicates to the audio |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 198fc3c3291b..2ff9eb00fdec 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -122,42 +122,6 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | |||
| 122 | drm_mode_set_name(panel_fixed_mode); | 122 | drm_mode_set_name(panel_fixed_mode); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static bool | ||
| 126 | lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a, | ||
| 127 | const struct lvds_dvo_timing *b) | ||
| 128 | { | ||
| 129 | if (a->hactive_hi != b->hactive_hi || | ||
| 130 | a->hactive_lo != b->hactive_lo) | ||
| 131 | return false; | ||
| 132 | |||
| 133 | if (a->hsync_off_hi != b->hsync_off_hi || | ||
| 134 | a->hsync_off_lo != b->hsync_off_lo) | ||
| 135 | return false; | ||
| 136 | |||
| 137 | if (a->hsync_pulse_width != b->hsync_pulse_width) | ||
| 138 | return false; | ||
| 139 | |||
| 140 | if (a->hblank_hi != b->hblank_hi || | ||
| 141 | a->hblank_lo != b->hblank_lo) | ||
| 142 | return false; | ||
| 143 | |||
| 144 | if (a->vactive_hi != b->vactive_hi || | ||
| 145 | a->vactive_lo != b->vactive_lo) | ||
| 146 | return false; | ||
| 147 | |||
| 148 | if (a->vsync_off != b->vsync_off) | ||
| 149 | return false; | ||
| 150 | |||
| 151 | if (a->vsync_pulse_width != b->vsync_pulse_width) | ||
| 152 | return false; | ||
| 153 | |||
| 154 | if (a->vblank_hi != b->vblank_hi || | ||
| 155 | a->vblank_lo != b->vblank_lo) | ||
| 156 | return false; | ||
| 157 | |||
| 158 | return true; | ||
| 159 | } | ||
| 160 | |||
| 161 | static const struct lvds_dvo_timing * | 125 | static const struct lvds_dvo_timing * |
| 162 | get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, | 126 | get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, |
| 163 | const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs, | 127 | const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs, |
| @@ -213,7 +177,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
| 213 | const struct lvds_dvo_timing *panel_dvo_timing; | 177 | const struct lvds_dvo_timing *panel_dvo_timing; |
| 214 | const struct lvds_fp_timing *fp_timing; | 178 | const struct lvds_fp_timing *fp_timing; |
| 215 | struct drm_display_mode *panel_fixed_mode; | 179 | struct drm_display_mode *panel_fixed_mode; |
| 216 | int i, downclock, drrs_mode; | 180 | int drrs_mode; |
| 217 | 181 | ||
| 218 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); | 182 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); |
| 219 | if (!lvds_options) | 183 | if (!lvds_options) |
| @@ -272,30 +236,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
| 272 | DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); | 236 | DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); |
| 273 | drm_mode_debug_printmodeline(panel_fixed_mode); | 237 | drm_mode_debug_printmodeline(panel_fixed_mode); |
| 274 | 238 | ||
| 275 | /* | ||
| 276 | * Iterate over the LVDS panel timing info to find the lowest clock | ||
| 277 | * for the native resolution. | ||
| 278 | */ | ||
| 279 | downclock = panel_dvo_timing->clock; | ||
| 280 | for (i = 0; i < 16; i++) { | ||
| 281 | const struct lvds_dvo_timing *dvo_timing; | ||
| 282 | |||
| 283 | dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, | ||
| 284 | lvds_lfp_data_ptrs, | ||
| 285 | i); | ||
| 286 | if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) && | ||
| 287 | dvo_timing->clock < downclock) | ||
| 288 | downclock = dvo_timing->clock; | ||
| 289 | } | ||
| 290 | |||
| 291 | if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) { | ||
| 292 | dev_priv->lvds_downclock_avail = 1; | ||
| 293 | dev_priv->lvds_downclock = downclock * 10; | ||
| 294 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. " | ||
| 295 | "Normal Clock %dKHz, downclock %dKHz\n", | ||
| 296 | panel_fixed_mode->clock, 10*downclock); | ||
| 297 | } | ||
| 298 | |||
| 299 | fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, | 239 | fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, |
| 300 | lvds_lfp_data_ptrs, | 240 | lvds_lfp_data_ptrs, |
| 301 | lvds_options->panel_type); | 241 | lvds_options->panel_type); |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index bcb41e61877d..6d8a7bf06dfc 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
| @@ -389,6 +389,7 @@ static void finish_csr_load(const struct firmware *fw, void *context) | |||
| 389 | intel_csr_load_program(dev); | 389 | intel_csr_load_program(dev); |
| 390 | fw_loaded = true; | 390 | fw_loaded = true; |
| 391 | 391 | ||
| 392 | DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path); | ||
| 392 | out: | 393 | out: |
| 393 | if (fw_loaded) | 394 | if (fw_loaded) |
| 394 | intel_runtime_pm_put(dev_priv); | 395 | intel_runtime_pm_put(dev_priv); |
| @@ -422,6 +423,8 @@ void intel_csr_ucode_init(struct drm_device *dev) | |||
| 422 | return; | 423 | return; |
| 423 | } | 424 | } |
| 424 | 425 | ||
| 426 | DRM_DEBUG_KMS("Loading %s\n", csr->fw_path); | ||
| 427 | |||
| 425 | /* | 428 | /* |
| 426 | * Obtain a runtime pm reference, until CSR is loaded, | 429 | * Obtain a runtime pm reference, until CSR is loaded, |
| 427 | * to avoid entering runtime-suspend. | 430 | * to avoid entering runtime-suspend. |
| @@ -459,7 +462,8 @@ void intel_csr_ucode_fini(struct drm_device *dev) | |||
| 459 | 462 | ||
| 460 | void assert_csr_loaded(struct drm_i915_private *dev_priv) | 463 | void assert_csr_loaded(struct drm_i915_private *dev_priv) |
| 461 | { | 464 | { |
| 462 | WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n"); | 465 | WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED, |
| 466 | "CSR is not loaded.\n"); | ||
| 463 | WARN(!I915_READ(CSR_PROGRAM_BASE), | 467 | WARN(!I915_READ(CSR_PROGRAM_BASE), |
| 464 | "CSR program storage start is NULL\n"); | 468 | "CSR program storage start is NULL\n"); |
| 465 | WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); | 469 | WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index cacb07b7a8f1..9a40bfb20e0c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | struct ddi_buf_trans { | 31 | struct ddi_buf_trans { |
| 32 | u32 trans1; /* balance leg enable, de-emph level */ | 32 | u32 trans1; /* balance leg enable, de-emph level */ |
| 33 | u32 trans2; /* vref sel, vswing */ | 33 | u32 trans2; /* vref sel, vswing */ |
| 34 | u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */ | ||
| 34 | }; | 35 | }; |
| 35 | 36 | ||
| 36 | /* HDMI/DVI modes ignore everything but the last 2 items. So we share | 37 | /* HDMI/DVI modes ignore everything but the last 2 items. So we share |
| @@ -38,134 +39,213 @@ struct ddi_buf_trans { | |||
| 38 | * automatically adapt to HDMI connections as well | 39 | * automatically adapt to HDMI connections as well |
| 39 | */ | 40 | */ |
| 40 | static const struct ddi_buf_trans hsw_ddi_translations_dp[] = { | 41 | static const struct ddi_buf_trans hsw_ddi_translations_dp[] = { |
| 41 | { 0x00FFFFFF, 0x0006000E }, | 42 | { 0x00FFFFFF, 0x0006000E, 0x0 }, |
| 42 | { 0x00D75FFF, 0x0005000A }, | 43 | { 0x00D75FFF, 0x0005000A, 0x0 }, |
| 43 | { 0x00C30FFF, 0x00040006 }, | 44 | { 0x00C30FFF, 0x00040006, 0x0 }, |
| 44 | { 0x80AAAFFF, 0x000B0000 }, | 45 | { 0x80AAAFFF, 0x000B0000, 0x0 }, |
| 45 | { 0x00FFFFFF, 0x0005000A }, | 46 | { 0x00FFFFFF, 0x0005000A, 0x0 }, |
| 46 | { 0x00D75FFF, 0x000C0004 }, | 47 | { 0x00D75FFF, 0x000C0004, 0x0 }, |
| 47 | { 0x80C30FFF, 0x000B0000 }, | 48 | { 0x80C30FFF, 0x000B0000, 0x0 }, |
| 48 | { 0x00FFFFFF, 0x00040006 }, | 49 | { 0x00FFFFFF, 0x00040006, 0x0 }, |
| 49 | { 0x80D75FFF, 0x000B0000 }, | 50 | { 0x80D75FFF, 0x000B0000, 0x0 }, |
| 50 | }; | 51 | }; |
| 51 | 52 | ||
| 52 | static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = { | 53 | static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = { |
| 53 | { 0x00FFFFFF, 0x0007000E }, | 54 | { 0x00FFFFFF, 0x0007000E, 0x0 }, |
| 54 | { 0x00D75FFF, 0x000F000A }, | 55 | { 0x00D75FFF, 0x000F000A, 0x0 }, |
| 55 | { 0x00C30FFF, 0x00060006 }, | 56 | { 0x00C30FFF, 0x00060006, 0x0 }, |
| 56 | { 0x00AAAFFF, 0x001E0000 }, | 57 | { 0x00AAAFFF, 0x001E0000, 0x0 }, |
| 57 | { 0x00FFFFFF, 0x000F000A }, | 58 | { 0x00FFFFFF, 0x000F000A, 0x0 }, |
| 58 | { 0x00D75FFF, 0x00160004 }, | 59 | { 0x00D75FFF, 0x00160004, 0x0 }, |
| 59 | { 0x00C30FFF, 0x001E0000 }, | 60 | { 0x00C30FFF, 0x001E0000, 0x0 }, |
| 60 | { 0x00FFFFFF, 0x00060006 }, | 61 | { 0x00FFFFFF, 0x00060006, 0x0 }, |
| 61 | { 0x00D75FFF, 0x001E0000 }, | 62 | { 0x00D75FFF, 0x001E0000, 0x0 }, |
| 62 | }; | 63 | }; |
| 63 | 64 | ||
| 64 | static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = { | 65 | static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = { |
| 65 | /* Idx NT mV d T mV d db */ | 66 | /* Idx NT mV d T mV d db */ |
| 66 | { 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */ | 67 | { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */ |
| 67 | { 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */ | 68 | { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */ |
| 68 | { 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */ | 69 | { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */ |
| 69 | { 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */ | 70 | { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */ |
| 70 | { 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */ | 71 | { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */ |
| 71 | { 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */ | 72 | { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */ |
| 72 | { 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */ | 73 | { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */ |
| 73 | { 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */ | 74 | { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */ |
| 74 | { 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */ | 75 | { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */ |
| 75 | { 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */ | 76 | { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */ |
| 76 | { 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */ | 77 | { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */ |
| 77 | { 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */ | 78 | { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */ |
| 78 | }; | 79 | }; |
| 79 | 80 | ||
| 80 | static const struct ddi_buf_trans bdw_ddi_translations_edp[] = { | 81 | static const struct ddi_buf_trans bdw_ddi_translations_edp[] = { |
| 81 | { 0x00FFFFFF, 0x00000012 }, | 82 | { 0x00FFFFFF, 0x00000012, 0x0 }, |
| 82 | { 0x00EBAFFF, 0x00020011 }, | 83 | { 0x00EBAFFF, 0x00020011, 0x0 }, |
| 83 | { 0x00C71FFF, 0x0006000F }, | 84 | { 0x00C71FFF, 0x0006000F, 0x0 }, |
| 84 | { 0x00AAAFFF, 0x000E000A }, | 85 | { 0x00AAAFFF, 0x000E000A, 0x0 }, |
| 85 | { 0x00FFFFFF, 0x00020011 }, | 86 | { 0x00FFFFFF, 0x00020011, 0x0 }, |
| 86 | { 0x00DB6FFF, 0x0005000F }, | 87 | { 0x00DB6FFF, 0x0005000F, 0x0 }, |
| 87 | { 0x00BEEFFF, 0x000A000C }, | 88 | { 0x00BEEFFF, 0x000A000C, 0x0 }, |
| 88 | { 0x00FFFFFF, 0x0005000F }, | 89 | { 0x00FFFFFF, 0x0005000F, 0x0 }, |
| 89 | { 0x00DB6FFF, 0x000A000C }, | 90 | { 0x00DB6FFF, 0x000A000C, 0x0 }, |
| 90 | }; | 91 | }; |
| 91 | 92 | ||
| 92 | static const struct ddi_buf_trans bdw_ddi_translations_dp[] = { | 93 | static const struct ddi_buf_trans bdw_ddi_translations_dp[] = { |
| 93 | { 0x00FFFFFF, 0x0007000E }, | 94 | { 0x00FFFFFF, 0x0007000E, 0x0 }, |
| 94 | { 0x00D75FFF, 0x000E000A }, | 95 | { 0x00D75FFF, 0x000E000A, 0x0 }, |
| 95 | { 0x00BEFFFF, 0x00140006 }, | 96 | { 0x00BEFFFF, 0x00140006, 0x0 }, |
| 96 | { 0x80B2CFFF, 0x001B0002 }, | 97 | { 0x80B2CFFF, 0x001B0002, 0x0 }, |
| 97 | { 0x00FFFFFF, 0x000E000A }, | 98 | { 0x00FFFFFF, 0x000E000A, 0x0 }, |
| 98 | { 0x00DB6FFF, 0x00160005 }, | 99 | { 0x00DB6FFF, 0x00160005, 0x0 }, |
| 99 | { 0x80C71FFF, 0x001A0002 }, | 100 | { 0x80C71FFF, 0x001A0002, 0x0 }, |
| 100 | { 0x00F7DFFF, 0x00180004 }, | 101 | { 0x00F7DFFF, 0x00180004, 0x0 }, |
| 101 | { 0x80D75FFF, 0x001B0002 }, | 102 | { 0x80D75FFF, 0x001B0002, 0x0 }, |
| 102 | }; | 103 | }; |
| 103 | 104 | ||
| 104 | static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = { | 105 | static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = { |
| 105 | { 0x00FFFFFF, 0x0001000E }, | 106 | { 0x00FFFFFF, 0x0001000E, 0x0 }, |
| 106 | { 0x00D75FFF, 0x0004000A }, | 107 | { 0x00D75FFF, 0x0004000A, 0x0 }, |
| 107 | { 0x00C30FFF, 0x00070006 }, | 108 | { 0x00C30FFF, 0x00070006, 0x0 }, |
| 108 | { 0x00AAAFFF, 0x000C0000 }, | 109 | { 0x00AAAFFF, 0x000C0000, 0x0 }, |
| 109 | { 0x00FFFFFF, 0x0004000A }, | 110 | { 0x00FFFFFF, 0x0004000A, 0x0 }, |
| 110 | { 0x00D75FFF, 0x00090004 }, | 111 | { 0x00D75FFF, 0x00090004, 0x0 }, |
| 111 | { 0x00C30FFF, 0x000C0000 }, | 112 | { 0x00C30FFF, 0x000C0000, 0x0 }, |
| 112 | { 0x00FFFFFF, 0x00070006 }, | 113 | { 0x00FFFFFF, 0x00070006, 0x0 }, |
| 113 | { 0x00D75FFF, 0x000C0000 }, | 114 | { 0x00D75FFF, 0x000C0000, 0x0 }, |
| 114 | }; | 115 | }; |
| 115 | 116 | ||
| 116 | static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { | 117 | static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { |
| 117 | /* Idx NT mV d T mV df db */ | 118 | /* Idx NT mV d T mV df db */ |
| 118 | { 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */ | 119 | { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */ |
| 119 | { 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */ | 120 | { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */ |
| 120 | { 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */ | 121 | { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */ |
| 121 | { 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */ | 122 | { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */ |
| 122 | { 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */ | 123 | { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */ |
| 123 | { 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */ | 124 | { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */ |
| 124 | { 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */ | 125 | { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */ |
| 125 | { 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */ | 126 | { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */ |
| 126 | { 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */ | 127 | { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */ |
| 127 | { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */ | 128 | { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */ |
| 128 | }; | 129 | }; |
| 129 | 130 | ||
| 131 | /* Skylake H, S, and Skylake Y with 0.95V VccIO */ | ||
| 130 | static const struct ddi_buf_trans skl_ddi_translations_dp[] = { | 132 | static const struct ddi_buf_trans skl_ddi_translations_dp[] = { |
| 131 | { 0x00000018, 0x000000a2 }, | 133 | { 0x00002016, 0x000000A0, 0x0 }, |
| 132 | { 0x00004014, 0x0000009B }, | 134 | { 0x00005012, 0x0000009B, 0x0 }, |
| 133 | { 0x00006012, 0x00000088 }, | 135 | { 0x00007011, 0x00000088, 0x0 }, |
| 134 | { 0x00008010, 0x00000087 }, | 136 | { 0x00009010, 0x000000C7, 0x0 }, |
| 135 | { 0x00000018, 0x0000009B }, | 137 | { 0x00002016, 0x0000009B, 0x0 }, |
| 136 | { 0x00004014, 0x00000088 }, | 138 | { 0x00005012, 0x00000088, 0x0 }, |
| 137 | { 0x00006012, 0x00000087 }, | 139 | { 0x00007011, 0x000000C7, 0x0 }, |
| 138 | { 0x00000018, 0x00000088 }, | 140 | { 0x00002016, 0x000000DF, 0x0 }, |
| 139 | { 0x00004014, 0x00000087 }, | 141 | { 0x00005012, 0x000000C7, 0x0 }, |
| 140 | }; | 142 | }; |
| 141 | 143 | ||
| 142 | /* eDP 1.4 low vswing translation parameters */ | 144 | /* Skylake U */ |
| 145 | static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { | ||
| 146 | { 0x00002016, 0x000000A2, 0x0 }, | ||
| 147 | { 0x00005012, 0x00000088, 0x0 }, | ||
| 148 | { 0x00007011, 0x00000087, 0x0 }, | ||
| 149 | { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */ | ||
| 150 | { 0x00002016, 0x0000009D, 0x0 }, | ||
| 151 | { 0x00005012, 0x000000C7, 0x0 }, | ||
| 152 | { 0x00007011, 0x000000C7, 0x0 }, | ||
| 153 | { 0x00002016, 0x00000088, 0x0 }, | ||
| 154 | { 0x00005012, 0x000000C7, 0x0 }, | ||
| 155 | }; | ||
| 156 | |||
| 157 | /* Skylake Y with 0.85V VccIO */ | ||
| 158 | static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = { | ||
| 159 | { 0x00000018, 0x000000A2, 0x0 }, | ||
| 160 | { 0x00005012, 0x00000088, 0x0 }, | ||
| 161 | { 0x00007011, 0x00000087, 0x0 }, | ||
| 162 | { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */ | ||
| 163 | { 0x00000018, 0x0000009D, 0x0 }, | ||
| 164 | { 0x00005012, 0x000000C7, 0x0 }, | ||
| 165 | { 0x00007011, 0x000000C7, 0x0 }, | ||
| 166 | { 0x00000018, 0x00000088, 0x0 }, | ||
| 167 | { 0x00005012, 0x000000C7, 0x0 }, | ||
| 168 | }; | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Skylake H and S, and Skylake Y with 0.95V VccIO | ||
| 172 | * eDP 1.4 low vswing translation parameters | ||
| 173 | */ | ||
| 143 | static const struct ddi_buf_trans skl_ddi_translations_edp[] = { | 174 | static const struct ddi_buf_trans skl_ddi_translations_edp[] = { |
| 144 | { 0x00000018, 0x000000a8 }, | 175 | { 0x00000018, 0x000000A8, 0x0 }, |
| 145 | { 0x00002016, 0x000000ab }, | 176 | { 0x00004013, 0x000000A9, 0x0 }, |
| 146 | { 0x00006012, 0x000000a2 }, | 177 | { 0x00007011, 0x000000A2, 0x0 }, |
| 147 | { 0x00008010, 0x00000088 }, | 178 | { 0x00009010, 0x0000009C, 0x0 }, |
| 148 | { 0x00000018, 0x000000ab }, | 179 | { 0x00000018, 0x000000A9, 0x0 }, |
| 149 | { 0x00004014, 0x000000a2 }, | 180 | { 0x00006013, 0x000000A2, 0x0 }, |
| 150 | { 0x00006012, 0x000000a6 }, | 181 | { 0x00007011, 0x000000A6, 0x0 }, |
| 151 | { 0x00000018, 0x000000a2 }, | 182 | { 0x00000018, 0x000000AB, 0x0 }, |
| 152 | { 0x00005013, 0x0000009c }, | 183 | { 0x00007013, 0x0000009F, 0x0 }, |
| 153 | { 0x00000018, 0x00000088 }, | 184 | { 0x00000018, 0x000000DF, 0x0 }, |
| 185 | }; | ||
| 186 | |||
| 187 | /* | ||
| 188 | * Skylake U | ||
| 189 | * eDP 1.4 low vswing translation parameters | ||
| 190 | */ | ||
| 191 | static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = { | ||
| 192 | { 0x00000018, 0x000000A8, 0x0 }, | ||
| 193 | { 0x00004013, 0x000000A9, 0x0 }, | ||
| 194 | { 0x00007011, 0x000000A2, 0x0 }, | ||
| 195 | { 0x00009010, 0x0000009C, 0x0 }, | ||
| 196 | { 0x00000018, 0x000000A9, 0x0 }, | ||
| 197 | { 0x00006013, 0x000000A2, 0x0 }, | ||
| 198 | { 0x00007011, 0x000000A6, 0x0 }, | ||
| 199 | { 0x00002016, 0x000000AB, 0x0 }, | ||
| 200 | { 0x00005013, 0x0000009F, 0x0 }, | ||
| 201 | { 0x00000018, 0x000000DF, 0x0 }, | ||
| 154 | }; | 202 | }; |
| 155 | 203 | ||
| 204 | /* | ||
| 205 | * Skylake Y with 0.95V VccIO | ||
| 206 | * eDP 1.4 low vswing translation parameters | ||
| 207 | */ | ||
| 208 | static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = { | ||
| 209 | { 0x00000018, 0x000000A8, 0x0 }, | ||
| 210 | { 0x00004013, 0x000000AB, 0x0 }, | ||
| 211 | { 0x00007011, 0x000000A4, 0x0 }, | ||
| 212 | { 0x00009010, 0x000000DF, 0x0 }, | ||
| 213 | { 0x00000018, 0x000000AA, 0x0 }, | ||
| 214 | { 0x00006013, 0x000000A4, 0x0 }, | ||
| 215 | { 0x00007011, 0x0000009D, 0x0 }, | ||
| 216 | { 0x00000018, 0x000000A0, 0x0 }, | ||
| 217 | { 0x00006012, 0x000000DF, 0x0 }, | ||
| 218 | { 0x00000018, 0x0000008A, 0x0 }, | ||
| 219 | }; | ||
| 156 | 220 | ||
| 221 | /* Skylake H, S and U, and Skylake Y with 0.95V VccIO */ | ||
| 157 | static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { | 222 | static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { |
| 158 | { 0x00000018, 0x000000ac }, | 223 | { 0x00000018, 0x000000AC, 0x0 }, |
| 159 | { 0x00005012, 0x0000009d }, | 224 | { 0x00005012, 0x0000009D, 0x0 }, |
| 160 | { 0x00007011, 0x00000088 }, | 225 | { 0x00007011, 0x00000088, 0x0 }, |
| 161 | { 0x00000018, 0x000000a1 }, | 226 | { 0x00000018, 0x000000A1, 0x0 }, |
| 162 | { 0x00000018, 0x00000098 }, | 227 | { 0x00000018, 0x00000098, 0x0 }, |
| 163 | { 0x00004013, 0x00000088 }, | 228 | { 0x00004013, 0x00000088, 0x0 }, |
| 164 | { 0x00006012, 0x00000087 }, | 229 | { 0x00006012, 0x00000087, 0x0 }, |
| 165 | { 0x00000018, 0x000000df }, | 230 | { 0x00000018, 0x000000DF, 0x0 }, |
| 166 | { 0x00003015, 0x00000087 }, | 231 | { 0x00003015, 0x00000087, 0x0 }, /* Default */ |
| 167 | { 0x00003015, 0x000000c7 }, | 232 | { 0x00003015, 0x000000C7, 0x0 }, |
| 168 | { 0x00000018, 0x000000c7 }, | 233 | { 0x00000018, 0x000000C7, 0x0 }, |
| 234 | }; | ||
| 235 | |||
| 236 | /* Skylake Y with 0.85V VccIO */ | ||
| 237 | static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = { | ||
| 238 | { 0x00000018, 0x000000A1, 0x0 }, | ||
| 239 | { 0x00005012, 0x000000DF, 0x0 }, | ||
| 240 | { 0x00007011, 0x00000084, 0x0 }, | ||
| 241 | { 0x00000018, 0x000000A4, 0x0 }, | ||
| 242 | { 0x00000018, 0x0000009D, 0x0 }, | ||
| 243 | { 0x00004013, 0x00000080, 0x0 }, | ||
| 244 | { 0x00006013, 0x000000C7, 0x0 }, | ||
| 245 | { 0x00000018, 0x0000008A, 0x0 }, | ||
| 246 | { 0x00003015, 0x000000C7, 0x0 }, /* Default */ | ||
| 247 | { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost */ | ||
| 248 | { 0x00000018, 0x000000C7, 0x0 }, | ||
| 169 | }; | 249 | }; |
| 170 | 250 | ||
| 171 | struct bxt_ddi_buf_trans { | 251 | struct bxt_ddi_buf_trans { |
| @@ -181,16 +261,16 @@ struct bxt_ddi_buf_trans { | |||
| 181 | */ | 261 | */ |
| 182 | static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { | 262 | static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { |
| 183 | /* Idx NT mV diff db */ | 263 | /* Idx NT mV diff db */ |
| 184 | { 52, 0, 0, 128, true }, /* 0: 400 0 */ | 264 | { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */ |
| 185 | { 78, 0, 0, 85, false }, /* 1: 400 3.5 */ | 265 | { 78, 0x9A, 0, 85, false }, /* 1: 400 3.5 */ |
| 186 | { 104, 0, 0, 64, false }, /* 2: 400 6 */ | 266 | { 104, 0x9A, 0, 64, false }, /* 2: 400 6 */ |
| 187 | { 154, 0, 0, 43, false }, /* 3: 400 9.5 */ | 267 | { 154, 0x9A, 0, 43, false }, /* 3: 400 9.5 */ |
| 188 | { 77, 0, 0, 128, false }, /* 4: 600 0 */ | 268 | { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */ |
| 189 | { 116, 0, 0, 85, false }, /* 5: 600 3.5 */ | 269 | { 116, 0x9A, 0, 85, false }, /* 5: 600 3.5 */ |
| 190 | { 154, 0, 0, 64, false }, /* 6: 600 6 */ | 270 | { 154, 0x9A, 0, 64, false }, /* 6: 600 6 */ |
| 191 | { 102, 0, 0, 128, false }, /* 7: 800 0 */ | 271 | { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */ |
| 192 | { 154, 0, 0, 85, false }, /* 8: 800 3.5 */ | 272 | { 154, 0x9A, 0, 85, false }, /* 8: 800 3.5 */ |
| 193 | { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */ | 273 | { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */ |
| 194 | }; | 274 | }; |
| 195 | 275 | ||
| 196 | /* BSpec has 2 recommended values - entries 0 and 8. | 276 | /* BSpec has 2 recommended values - entries 0 and 8. |
| @@ -198,18 +278,21 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { | |||
| 198 | */ | 278 | */ |
| 199 | static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { | 279 | static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { |
| 200 | /* Idx NT mV diff db */ | 280 | /* Idx NT mV diff db */ |
| 201 | { 52, 0, 0, 128, false }, /* 0: 400 0 */ | 281 | { 52, 0x9A, 0, 128, false }, /* 0: 400 0 */ |
| 202 | { 52, 0, 0, 85, false }, /* 1: 400 3.5 */ | 282 | { 52, 0x9A, 0, 85, false }, /* 1: 400 3.5 */ |
| 203 | { 52, 0, 0, 64, false }, /* 2: 400 6 */ | 283 | { 52, 0x9A, 0, 64, false }, /* 2: 400 6 */ |
| 204 | { 42, 0, 0, 43, false }, /* 3: 400 9.5 */ | 284 | { 42, 0x9A, 0, 43, false }, /* 3: 400 9.5 */ |
| 205 | { 77, 0, 0, 128, false }, /* 4: 600 0 */ | 285 | { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */ |
| 206 | { 77, 0, 0, 85, false }, /* 5: 600 3.5 */ | 286 | { 77, 0x9A, 0, 85, false }, /* 5: 600 3.5 */ |
| 207 | { 77, 0, 0, 64, false }, /* 6: 600 6 */ | 287 | { 77, 0x9A, 0, 64, false }, /* 6: 600 6 */ |
| 208 | { 102, 0, 0, 128, false }, /* 7: 800 0 */ | 288 | { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */ |
| 209 | { 102, 0, 0, 85, false }, /* 8: 800 3.5 */ | 289 | { 102, 0x9A, 0, 85, false }, /* 8: 800 3.5 */ |
| 210 | { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */ | 290 | { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */ |
| 211 | }; | 291 | }; |
| 212 | 292 | ||
| 293 | static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, | ||
| 294 | enum port port, int type); | ||
| 295 | |||
| 213 | static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, | 296 | static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, |
| 214 | struct intel_digital_port **dig_port, | 297 | struct intel_digital_port **dig_port, |
| 215 | enum port *port) | 298 | enum port *port) |
| @@ -249,6 +332,102 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) | |||
| 249 | return intel_dig_port->hdmi.hdmi_reg; | 332 | return intel_dig_port->hdmi.hdmi_reg; |
| 250 | } | 333 | } |
| 251 | 334 | ||
| 335 | static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, | ||
| 336 | int *n_entries) | ||
| 337 | { | ||
| 338 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 339 | const struct ddi_buf_trans *ddi_translations; | ||
| 340 | static int is_095v = -1; | ||
| 341 | |||
| 342 | if (is_095v == -1) { | ||
| 343 | u32 spr1 = I915_READ(UAIMI_SPR1); | ||
| 344 | |||
| 345 | is_095v = spr1 & SKL_VCCIO_MASK; | ||
| 346 | } | ||
| 347 | |||
| 348 | if (IS_SKL_ULX(dev) && !is_095v) { | ||
| 349 | ddi_translations = skl_y_085v_ddi_translations_dp; | ||
| 350 | *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_dp); | ||
| 351 | } else if (IS_SKL_ULT(dev)) { | ||
| 352 | ddi_translations = skl_u_ddi_translations_dp; | ||
| 353 | *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); | ||
| 354 | } else { | ||
| 355 | ddi_translations = skl_ddi_translations_dp; | ||
| 356 | *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); | ||
| 357 | } | ||
| 358 | |||
| 359 | return ddi_translations; | ||
| 360 | } | ||
| 361 | |||
| 362 | static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, | ||
| 363 | int *n_entries) | ||
| 364 | { | ||
| 365 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 366 | const struct ddi_buf_trans *ddi_translations; | ||
| 367 | static int is_095v = -1; | ||
| 368 | |||
| 369 | if (is_095v == -1) { | ||
| 370 | u32 spr1 = I915_READ(UAIMI_SPR1); | ||
| 371 | |||
| 372 | is_095v = spr1 & SKL_VCCIO_MASK; | ||
| 373 | } | ||
| 374 | |||
| 375 | if (IS_SKL_ULX(dev) && !is_095v) { | ||
| 376 | if (dev_priv->edp_low_vswing) { | ||
| 377 | ddi_translations = skl_y_085v_ddi_translations_edp; | ||
| 378 | *n_entries = | ||
| 379 | ARRAY_SIZE(skl_y_085v_ddi_translations_edp); | ||
| 380 | } else { | ||
| 381 | ddi_translations = skl_y_085v_ddi_translations_dp; | ||
| 382 | *n_entries = | ||
| 383 | ARRAY_SIZE(skl_y_085v_ddi_translations_dp); | ||
| 384 | } | ||
| 385 | } else if (IS_SKL_ULT(dev)) { | ||
| 386 | if (dev_priv->edp_low_vswing) { | ||
| 387 | ddi_translations = skl_u_ddi_translations_edp; | ||
| 388 | *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); | ||
| 389 | } else { | ||
| 390 | ddi_translations = skl_u_ddi_translations_dp; | ||
| 391 | *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); | ||
| 392 | } | ||
| 393 | } else { | ||
| 394 | if (dev_priv->edp_low_vswing) { | ||
| 395 | ddi_translations = skl_ddi_translations_edp; | ||
| 396 | *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); | ||
| 397 | } else { | ||
| 398 | ddi_translations = skl_ddi_translations_dp; | ||
| 399 | *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); | ||
| 400 | } | ||
| 401 | } | ||
| 402 | |||
| 403 | return ddi_translations; | ||
| 404 | } | ||
| 405 | |||
| 406 | static const struct ddi_buf_trans * | ||
| 407 | skl_get_buf_trans_hdmi(struct drm_device *dev, | ||
| 408 | int *n_entries) | ||
| 409 | { | ||
| 410 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 411 | const struct ddi_buf_trans *ddi_translations; | ||
| 412 | static int is_095v = -1; | ||
| 413 | |||
| 414 | if (is_095v == -1) { | ||
| 415 | u32 spr1 = I915_READ(UAIMI_SPR1); | ||
| 416 | |||
| 417 | is_095v = spr1 & SKL_VCCIO_MASK; | ||
| 418 | } | ||
| 419 | |||
| 420 | if (IS_SKL_ULX(dev) && !is_095v) { | ||
| 421 | ddi_translations = skl_y_085v_ddi_translations_hdmi; | ||
| 422 | *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_hdmi); | ||
| 423 | } else { | ||
| 424 | ddi_translations = skl_ddi_translations_hdmi; | ||
| 425 | *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); | ||
| 426 | } | ||
| 427 | |||
| 428 | return ddi_translations; | ||
| 429 | } | ||
| 430 | |||
| 252 | /* | 431 | /* |
| 253 | * Starting with Haswell, DDI port buffers must be programmed with correct | 432 | * Starting with Haswell, DDI port buffers must be programmed with correct |
| 254 | * values in advance. The buffer values are different for FDI and DP modes, | 433 | * values in advance. The buffer values are different for FDI and DP modes, |
| @@ -280,19 +459,13 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, | |||
| 280 | return; | 459 | return; |
| 281 | } else if (IS_SKYLAKE(dev)) { | 460 | } else if (IS_SKYLAKE(dev)) { |
| 282 | ddi_translations_fdi = NULL; | 461 | ddi_translations_fdi = NULL; |
| 283 | ddi_translations_dp = skl_ddi_translations_dp; | 462 | ddi_translations_dp = |
| 284 | n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp); | 463 | skl_get_buf_trans_dp(dev, &n_dp_entries); |
| 285 | if (dev_priv->edp_low_vswing) { | 464 | ddi_translations_edp = |
| 286 | ddi_translations_edp = skl_ddi_translations_edp; | 465 | skl_get_buf_trans_edp(dev, &n_edp_entries); |
| 287 | n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp); | 466 | ddi_translations_hdmi = |
| 288 | } else { | 467 | skl_get_buf_trans_hdmi(dev, &n_hdmi_entries); |
| 289 | ddi_translations_edp = skl_ddi_translations_dp; | 468 | hdmi_default_entry = 8; |
| 290 | n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp); | ||
| 291 | } | ||
| 292 | |||
| 293 | ddi_translations_hdmi = skl_ddi_translations_hdmi; | ||
| 294 | n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); | ||
| 295 | hdmi_default_entry = 7; | ||
| 296 | } else if (IS_BROADWELL(dev)) { | 469 | } else if (IS_BROADWELL(dev)) { |
| 297 | ddi_translations_fdi = bdw_ddi_translations_fdi; | 470 | ddi_translations_fdi = bdw_ddi_translations_fdi; |
| 298 | ddi_translations_dp = bdw_ddi_translations_dp; | 471 | ddi_translations_dp = bdw_ddi_translations_dp; |
| @@ -625,11 +798,11 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) | |||
| 625 | (void) (&__a == &__b); \ | 798 | (void) (&__a == &__b); \ |
| 626 | __a > __b ? (__a - __b) : (__b - __a); }) | 799 | __a > __b ? (__a - __b) : (__b - __a); }) |
| 627 | 800 | ||
| 628 | struct wrpll_rnp { | 801 | struct hsw_wrpll_rnp { |
| 629 | unsigned p, n2, r2; | 802 | unsigned p, n2, r2; |
| 630 | }; | 803 | }; |
| 631 | 804 | ||
| 632 | static unsigned wrpll_get_budget_for_freq(int clock) | 805 | static unsigned hsw_wrpll_get_budget_for_freq(int clock) |
| 633 | { | 806 | { |
| 634 | unsigned budget; | 807 | unsigned budget; |
| 635 | 808 | ||
| @@ -703,9 +876,9 @@ static unsigned wrpll_get_budget_for_freq(int clock) | |||
| 703 | return budget; | 876 | return budget; |
| 704 | } | 877 | } |
| 705 | 878 | ||
| 706 | static void wrpll_update_rnp(uint64_t freq2k, unsigned budget, | 879 | static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, |
| 707 | unsigned r2, unsigned n2, unsigned p, | 880 | unsigned r2, unsigned n2, unsigned p, |
| 708 | struct wrpll_rnp *best) | 881 | struct hsw_wrpll_rnp *best) |
| 709 | { | 882 | { |
| 710 | uint64_t a, b, c, d, diff, diff_best; | 883 | uint64_t a, b, c, d, diff, diff_best; |
| 711 | 884 | ||
| @@ -762,8 +935,7 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget, | |||
| 762 | /* Otherwise a < c && b >= d, do nothing */ | 935 | /* Otherwise a < c && b >= d, do nothing */ |
| 763 | } | 936 | } |
| 764 | 937 | ||
| 765 | static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, | 938 | static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) |
| 766 | int reg) | ||
| 767 | { | 939 | { |
| 768 | int refclk = LC_FREQ; | 940 | int refclk = LC_FREQ; |
| 769 | int n, p, r; | 941 | int n, p, r; |
| @@ -856,6 +1028,26 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, | |||
| 856 | return dco_freq / (p0 * p1 * p2 * 5); | 1028 | return dco_freq / (p0 * p1 * p2 * 5); |
| 857 | } | 1029 | } |
| 858 | 1030 | ||
| 1031 | static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) | ||
| 1032 | { | ||
| 1033 | int dotclock; | ||
| 1034 | |||
| 1035 | if (pipe_config->has_pch_encoder) | ||
| 1036 | dotclock = intel_dotclock_calculate(pipe_config->port_clock, | ||
| 1037 | &pipe_config->fdi_m_n); | ||
| 1038 | else if (pipe_config->has_dp_encoder) | ||
| 1039 | dotclock = intel_dotclock_calculate(pipe_config->port_clock, | ||
| 1040 | &pipe_config->dp_m_n); | ||
| 1041 | else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) | ||
| 1042 | dotclock = pipe_config->port_clock * 2 / 3; | ||
| 1043 | else | ||
| 1044 | dotclock = pipe_config->port_clock; | ||
| 1045 | |||
| 1046 | if (pipe_config->pixel_multiplier) | ||
| 1047 | dotclock /= pipe_config->pixel_multiplier; | ||
| 1048 | |||
| 1049 | pipe_config->base.adjusted_mode.crtc_clock = dotclock; | ||
| 1050 | } | ||
| 859 | 1051 | ||
| 860 | static void skl_ddi_clock_get(struct intel_encoder *encoder, | 1052 | static void skl_ddi_clock_get(struct intel_encoder *encoder, |
| 861 | struct intel_crtc_state *pipe_config) | 1053 | struct intel_crtc_state *pipe_config) |
| @@ -902,12 +1094,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, | |||
| 902 | 1094 | ||
| 903 | pipe_config->port_clock = link_clock; | 1095 | pipe_config->port_clock = link_clock; |
| 904 | 1096 | ||
| 905 | if (pipe_config->has_dp_encoder) | 1097 | ddi_dotclock_get(pipe_config); |
| 906 | pipe_config->base.adjusted_mode.crtc_clock = | ||
| 907 | intel_dotclock_calculate(pipe_config->port_clock, | ||
| 908 | &pipe_config->dp_m_n); | ||
| 909 | else | ||
| 910 | pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; | ||
| 911 | } | 1098 | } |
| 912 | 1099 | ||
| 913 | static void hsw_ddi_clock_get(struct intel_encoder *encoder, | 1100 | static void hsw_ddi_clock_get(struct intel_encoder *encoder, |
| @@ -929,10 +1116,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, | |||
| 929 | link_clock = 270000; | 1116 | link_clock = 270000; |
| 930 | break; | 1117 | break; |
| 931 | case PORT_CLK_SEL_WRPLL1: | 1118 | case PORT_CLK_SEL_WRPLL1: |
| 932 | link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); | 1119 | link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); |
| 933 | break; | 1120 | break; |
| 934 | case PORT_CLK_SEL_WRPLL2: | 1121 | case PORT_CLK_SEL_WRPLL2: |
| 935 | link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); | 1122 | link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); |
| 936 | break; | 1123 | break; |
| 937 | case PORT_CLK_SEL_SPLL: | 1124 | case PORT_CLK_SEL_SPLL: |
| 938 | pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; | 1125 | pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; |
| @@ -954,23 +1141,32 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, | |||
| 954 | 1141 | ||
| 955 | pipe_config->port_clock = link_clock * 2; | 1142 | pipe_config->port_clock = link_clock * 2; |
| 956 | 1143 | ||
| 957 | if (pipe_config->has_pch_encoder) | 1144 | ddi_dotclock_get(pipe_config); |
| 958 | pipe_config->base.adjusted_mode.crtc_clock = | ||
| 959 | intel_dotclock_calculate(pipe_config->port_clock, | ||
| 960 | &pipe_config->fdi_m_n); | ||
| 961 | else if (pipe_config->has_dp_encoder) | ||
| 962 | pipe_config->base.adjusted_mode.crtc_clock = | ||
| 963 | intel_dotclock_calculate(pipe_config->port_clock, | ||
| 964 | &pipe_config->dp_m_n); | ||
| 965 | else | ||
| 966 | pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; | ||
| 967 | } | 1145 | } |
| 968 | 1146 | ||
| 969 | static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, | 1147 | static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, |
| 970 | enum intel_dpll_id dpll) | 1148 | enum intel_dpll_id dpll) |
| 971 | { | 1149 | { |
| 972 | /* FIXME formula not available in bspec */ | 1150 | struct intel_shared_dpll *pll; |
| 973 | return 0; | 1151 | struct intel_dpll_hw_state *state; |
| 1152 | intel_clock_t clock; | ||
| 1153 | |||
| 1154 | /* For DDI ports we always use a shared PLL. */ | ||
| 1155 | if (WARN_ON(dpll == DPLL_ID_PRIVATE)) | ||
| 1156 | return 0; | ||
| 1157 | |||
| 1158 | pll = &dev_priv->shared_dplls[dpll]; | ||
| 1159 | state = &pll->config.hw_state; | ||
| 1160 | |||
| 1161 | clock.m1 = 2; | ||
| 1162 | clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22; | ||
| 1163 | if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE) | ||
| 1164 | clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK; | ||
| 1165 | clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; | ||
| 1166 | clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; | ||
| 1167 | clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; | ||
| 1168 | |||
| 1169 | return chv_calc_dpll_params(100000, &clock); | ||
| 974 | } | 1170 | } |
| 975 | 1171 | ||
| 976 | static void bxt_ddi_clock_get(struct intel_encoder *encoder, | 1172 | static void bxt_ddi_clock_get(struct intel_encoder *encoder, |
| @@ -980,16 +1176,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder, | |||
| 980 | enum port port = intel_ddi_get_encoder_port(encoder); | 1176 | enum port port = intel_ddi_get_encoder_port(encoder); |
| 981 | uint32_t dpll = port; | 1177 | uint32_t dpll = port; |
| 982 | 1178 | ||
| 983 | pipe_config->port_clock = | 1179 | pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll); |
| 984 | bxt_calc_pll_link(dev_priv, dpll); | ||
| 985 | 1180 | ||
| 986 | if (pipe_config->has_dp_encoder) | 1181 | ddi_dotclock_get(pipe_config); |
| 987 | pipe_config->base.adjusted_mode.crtc_clock = | ||
| 988 | intel_dotclock_calculate(pipe_config->port_clock, | ||
| 989 | &pipe_config->dp_m_n); | ||
| 990 | else | ||
| 991 | pipe_config->base.adjusted_mode.crtc_clock = | ||
| 992 | pipe_config->port_clock; | ||
| 993 | } | 1182 | } |
| 994 | 1183 | ||
| 995 | void intel_ddi_clock_get(struct intel_encoder *encoder, | 1184 | void intel_ddi_clock_get(struct intel_encoder *encoder, |
| @@ -1011,12 +1200,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, | |||
| 1011 | { | 1200 | { |
| 1012 | uint64_t freq2k; | 1201 | uint64_t freq2k; |
| 1013 | unsigned p, n2, r2; | 1202 | unsigned p, n2, r2; |
| 1014 | struct wrpll_rnp best = { 0, 0, 0 }; | 1203 | struct hsw_wrpll_rnp best = { 0, 0, 0 }; |
| 1015 | unsigned budget; | 1204 | unsigned budget; |
| 1016 | 1205 | ||
| 1017 | freq2k = clock / 100; | 1206 | freq2k = clock / 100; |
| 1018 | 1207 | ||
| 1019 | budget = wrpll_get_budget_for_freq(clock); | 1208 | budget = hsw_wrpll_get_budget_for_freq(clock); |
| 1020 | 1209 | ||
| 1021 | /* Special case handling for 540 pixel clock: bypass WR PLL entirely | 1210 | /* Special case handling for 540 pixel clock: bypass WR PLL entirely |
| 1022 | * and directly pass the LC PLL to it. */ | 1211 | * and directly pass the LC PLL to it. */ |
| @@ -1060,8 +1249,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, | |||
| 1060 | n2++) { | 1249 | n2++) { |
| 1061 | 1250 | ||
| 1062 | for (p = P_MIN; p <= P_MAX; p += P_INC) | 1251 | for (p = P_MIN; p <= P_MAX; p += P_INC) |
| 1063 | wrpll_update_rnp(freq2k, budget, | 1252 | hsw_wrpll_update_rnp(freq2k, budget, |
| 1064 | r2, n2, p, &best); | 1253 | r2, n2, p, &best); |
| 1065 | } | 1254 | } |
| 1066 | } | 1255 | } |
| 1067 | 1256 | ||
| @@ -1105,6 +1294,102 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1105 | return true; | 1294 | return true; |
| 1106 | } | 1295 | } |
| 1107 | 1296 | ||
| 1297 | struct skl_wrpll_context { | ||
| 1298 | uint64_t min_deviation; /* current minimal deviation */ | ||
| 1299 | uint64_t central_freq; /* chosen central freq */ | ||
| 1300 | uint64_t dco_freq; /* chosen dco freq */ | ||
| 1301 | unsigned int p; /* chosen divider */ | ||
| 1302 | }; | ||
| 1303 | |||
| 1304 | static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) | ||
| 1305 | { | ||
| 1306 | memset(ctx, 0, sizeof(*ctx)); | ||
| 1307 | |||
| 1308 | ctx->min_deviation = U64_MAX; | ||
| 1309 | } | ||
| 1310 | |||
| 1311 | /* DCO freq must be within +1%/-6% of the DCO central freq */ | ||
| 1312 | #define SKL_DCO_MAX_PDEVIATION 100 | ||
| 1313 | #define SKL_DCO_MAX_NDEVIATION 600 | ||
| 1314 | |||
| 1315 | static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, | ||
| 1316 | uint64_t central_freq, | ||
| 1317 | uint64_t dco_freq, | ||
| 1318 | unsigned int divider) | ||
| 1319 | { | ||
| 1320 | uint64_t deviation; | ||
| 1321 | |||
| 1322 | deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), | ||
| 1323 | central_freq); | ||
| 1324 | |||
| 1325 | /* positive deviation */ | ||
| 1326 | if (dco_freq >= central_freq) { | ||
| 1327 | if (deviation < SKL_DCO_MAX_PDEVIATION && | ||
| 1328 | deviation < ctx->min_deviation) { | ||
| 1329 | ctx->min_deviation = deviation; | ||
| 1330 | ctx->central_freq = central_freq; | ||
| 1331 | ctx->dco_freq = dco_freq; | ||
| 1332 | ctx->p = divider; | ||
| 1333 | } | ||
| 1334 | /* negative deviation */ | ||
| 1335 | } else if (deviation < SKL_DCO_MAX_NDEVIATION && | ||
| 1336 | deviation < ctx->min_deviation) { | ||
| 1337 | ctx->min_deviation = deviation; | ||
| 1338 | ctx->central_freq = central_freq; | ||
| 1339 | ctx->dco_freq = dco_freq; | ||
| 1340 | ctx->p = divider; | ||
| 1341 | } | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | static void skl_wrpll_get_multipliers(unsigned int p, | ||
| 1345 | unsigned int *p0 /* out */, | ||
| 1346 | unsigned int *p1 /* out */, | ||
| 1347 | unsigned int *p2 /* out */) | ||
| 1348 | { | ||
| 1349 | /* even dividers */ | ||
| 1350 | if (p % 2 == 0) { | ||
| 1351 | unsigned int half = p / 2; | ||
| 1352 | |||
| 1353 | if (half == 1 || half == 2 || half == 3 || half == 5) { | ||
| 1354 | *p0 = 2; | ||
| 1355 | *p1 = 1; | ||
| 1356 | *p2 = half; | ||
| 1357 | } else if (half % 2 == 0) { | ||
| 1358 | *p0 = 2; | ||
| 1359 | *p1 = half / 2; | ||
| 1360 | *p2 = 2; | ||
| 1361 | } else if (half % 3 == 0) { | ||
| 1362 | *p0 = 3; | ||
| 1363 | *p1 = half / 3; | ||
| 1364 | *p2 = 2; | ||
| 1365 | } else if (half % 7 == 0) { | ||
| 1366 | *p0 = 7; | ||
| 1367 | *p1 = half / 7; | ||
| 1368 | *p2 = 2; | ||
| 1369 | } | ||
| 1370 | } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */ | ||
| 1371 | *p0 = 3; | ||
| 1372 | *p1 = 1; | ||
| 1373 | *p2 = p / 3; | ||
| 1374 | } else if (p == 5 || p == 7) { | ||
| 1375 | *p0 = p; | ||
| 1376 | *p1 = 1; | ||
| 1377 | *p2 = 1; | ||
| 1378 | } else if (p == 15) { | ||
| 1379 | *p0 = 3; | ||
| 1380 | *p1 = 1; | ||
| 1381 | *p2 = 5; | ||
| 1382 | } else if (p == 21) { | ||
| 1383 | *p0 = 7; | ||
| 1384 | *p1 = 1; | ||
| 1385 | *p2 = 3; | ||
| 1386 | } else if (p == 35) { | ||
| 1387 | *p0 = 7; | ||
| 1388 | *p1 = 1; | ||
| 1389 | *p2 = 5; | ||
| 1390 | } | ||
| 1391 | } | ||
| 1392 | |||
| 1108 | struct skl_wrpll_params { | 1393 | struct skl_wrpll_params { |
| 1109 | uint32_t dco_fraction; | 1394 | uint32_t dco_fraction; |
| 1110 | uint32_t dco_integer; | 1395 | uint32_t dco_integer; |
| @@ -1115,150 +1400,145 @@ struct skl_wrpll_params { | |||
| 1115 | uint32_t central_freq; | 1400 | uint32_t central_freq; |
| 1116 | }; | 1401 | }; |
| 1117 | 1402 | ||
| 1118 | static void | 1403 | static void skl_wrpll_params_populate(struct skl_wrpll_params *params, |
| 1119 | skl_ddi_calculate_wrpll(int clock /* in Hz */, | 1404 | uint64_t afe_clock, |
| 1120 | struct skl_wrpll_params *wrpll_params) | 1405 | uint64_t central_freq, |
| 1406 | uint32_t p0, uint32_t p1, uint32_t p2) | ||
| 1121 | { | 1407 | { |
| 1122 | uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ | ||
| 1123 | uint64_t dco_central_freq[3] = {8400000000ULL, | ||
| 1124 | 9000000000ULL, | ||
| 1125 | 9600000000ULL}; | ||
| 1126 | uint32_t min_dco_deviation = 400; | ||
| 1127 | uint32_t min_dco_index = 3; | ||
| 1128 | uint32_t P0[4] = {1, 2, 3, 7}; | ||
| 1129 | uint32_t P2[4] = {1, 2, 3, 5}; | ||
| 1130 | bool found = false; | ||
| 1131 | uint32_t candidate_p = 0; | ||
| 1132 | uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0}; | ||
| 1133 | uint32_t candidate_p2[3] = {0}; | ||
| 1134 | uint32_t dco_central_freq_deviation[3]; | ||
| 1135 | uint32_t i, P1, k, dco_count; | ||
| 1136 | bool retry_with_odd = false; | ||
| 1137 | uint64_t dco_freq; | 1408 | uint64_t dco_freq; |
| 1138 | 1409 | ||
| 1139 | /* Determine P0, P1 or P2 */ | 1410 | switch (central_freq) { |
| 1140 | for (dco_count = 0; dco_count < 3; dco_count++) { | 1411 | case 9600000000ULL: |
| 1141 | found = false; | 1412 | params->central_freq = 0; |
| 1142 | candidate_p = | 1413 | break; |
| 1143 | div64_u64(dco_central_freq[dco_count], afe_clock); | 1414 | case 9000000000ULL: |
| 1144 | if (retry_with_odd == false) | 1415 | params->central_freq = 1; |
| 1145 | candidate_p = (candidate_p % 2 == 0 ? | 1416 | break; |
| 1146 | candidate_p : candidate_p + 1); | 1417 | case 8400000000ULL: |
| 1147 | 1418 | params->central_freq = 3; | |
| 1148 | for (P1 = 1; P1 < candidate_p; P1++) { | 1419 | } |
| 1149 | for (i = 0; i < 4; i++) { | ||
| 1150 | if (!(P0[i] != 1 || P1 == 1)) | ||
| 1151 | continue; | ||
| 1152 | |||
| 1153 | for (k = 0; k < 4; k++) { | ||
| 1154 | if (P1 != 1 && P2[k] != 2) | ||
| 1155 | continue; | ||
| 1156 | |||
| 1157 | if (candidate_p == P0[i] * P1 * P2[k]) { | ||
| 1158 | /* Found possible P0, P1, P2 */ | ||
| 1159 | found = true; | ||
| 1160 | candidate_p0[dco_count] = P0[i]; | ||
| 1161 | candidate_p1[dco_count] = P1; | ||
| 1162 | candidate_p2[dco_count] = P2[k]; | ||
| 1163 | goto found; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | } | ||
| 1167 | } | ||
| 1168 | } | ||
| 1169 | 1420 | ||
| 1170 | found: | 1421 | switch (p0) { |
| 1171 | if (found) { | 1422 | case 1: |
| 1172 | dco_central_freq_deviation[dco_count] = | 1423 | params->pdiv = 0; |
| 1173 | div64_u64(10000 * | 1424 | break; |
| 1174 | abs_diff((candidate_p * afe_clock), | 1425 | case 2: |
| 1175 | dco_central_freq[dco_count]), | 1426 | params->pdiv = 1; |
| 1176 | dco_central_freq[dco_count]); | 1427 | break; |
| 1177 | 1428 | case 3: | |
| 1178 | if (dco_central_freq_deviation[dco_count] < | 1429 | params->pdiv = 2; |
| 1179 | min_dco_deviation) { | 1430 | break; |
| 1180 | min_dco_deviation = | 1431 | case 7: |
| 1181 | dco_central_freq_deviation[dco_count]; | 1432 | params->pdiv = 4; |
| 1182 | min_dco_index = dco_count; | 1433 | break; |
| 1183 | } | 1434 | default: |
| 1184 | } | 1435 | WARN(1, "Incorrect PDiv\n"); |
| 1436 | } | ||
| 1185 | 1437 | ||
| 1186 | if (min_dco_index > 2 && dco_count == 2) { | 1438 | switch (p2) { |
| 1187 | retry_with_odd = true; | 1439 | case 5: |
| 1188 | dco_count = 0; | 1440 | params->kdiv = 0; |
| 1189 | } | 1441 | break; |
| 1442 | case 2: | ||
| 1443 | params->kdiv = 1; | ||
| 1444 | break; | ||
| 1445 | case 3: | ||
| 1446 | params->kdiv = 2; | ||
| 1447 | break; | ||
| 1448 | case 1: | ||
| 1449 | params->kdiv = 3; | ||
| 1450 | break; | ||
| 1451 | default: | ||
| 1452 | WARN(1, "Incorrect KDiv\n"); | ||
| 1190 | } | 1453 | } |
| 1191 | 1454 | ||
| 1192 | if (min_dco_index > 2) { | 1455 | params->qdiv_ratio = p1; |
| 1193 | WARN(1, "No valid values found for the given pixel clock\n"); | 1456 | params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1; |
| 1194 | } else { | ||
| 1195 | wrpll_params->central_freq = dco_central_freq[min_dco_index]; | ||
| 1196 | 1457 | ||
| 1197 | switch (dco_central_freq[min_dco_index]) { | 1458 | dco_freq = p0 * p1 * p2 * afe_clock; |
| 1198 | case 9600000000ULL: | ||
| 1199 | wrpll_params->central_freq = 0; | ||
| 1200 | break; | ||
| 1201 | case 9000000000ULL: | ||
| 1202 | wrpll_params->central_freq = 1; | ||
| 1203 | break; | ||
| 1204 | case 8400000000ULL: | ||
| 1205 | wrpll_params->central_freq = 3; | ||
| 1206 | } | ||
| 1207 | 1459 | ||
| 1208 | switch (candidate_p0[min_dco_index]) { | 1460 | /* |
| 1209 | case 1: | 1461 | * Intermediate values are in Hz. |
| 1210 | wrpll_params->pdiv = 0; | 1462 | * Divide by MHz to match bsepc |
| 1211 | break; | 1463 | */ |
| 1212 | case 2: | 1464 | params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); |
| 1213 | wrpll_params->pdiv = 1; | 1465 | params->dco_fraction = |
| 1214 | break; | 1466 | div_u64((div_u64(dco_freq, 24) - |
| 1215 | case 3: | 1467 | params->dco_integer * MHz(1)) * 0x8000, MHz(1)); |
| 1216 | wrpll_params->pdiv = 2; | 1468 | } |
| 1217 | break; | ||
| 1218 | case 7: | ||
| 1219 | wrpll_params->pdiv = 4; | ||
| 1220 | break; | ||
| 1221 | default: | ||
| 1222 | WARN(1, "Incorrect PDiv\n"); | ||
| 1223 | } | ||
| 1224 | 1469 | ||
| 1225 | switch (candidate_p2[min_dco_index]) { | 1470 | static bool |
| 1226 | case 5: | 1471 | skl_ddi_calculate_wrpll(int clock /* in Hz */, |
| 1227 | wrpll_params->kdiv = 0; | 1472 | struct skl_wrpll_params *wrpll_params) |
| 1228 | break; | 1473 | { |
| 1229 | case 2: | 1474 | uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ |
| 1230 | wrpll_params->kdiv = 1; | 1475 | uint64_t dco_central_freq[3] = {8400000000ULL, |
| 1231 | break; | 1476 | 9000000000ULL, |
| 1232 | case 3: | 1477 | 9600000000ULL}; |
| 1233 | wrpll_params->kdiv = 2; | 1478 | static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, |
| 1234 | break; | 1479 | 24, 28, 30, 32, 36, 40, 42, 44, |
| 1235 | case 1: | 1480 | 48, 52, 54, 56, 60, 64, 66, 68, |
| 1236 | wrpll_params->kdiv = 3; | 1481 | 70, 72, 76, 78, 80, 84, 88, 90, |
| 1237 | break; | 1482 | 92, 96, 98 }; |
| 1238 | default: | 1483 | static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 }; |
| 1239 | WARN(1, "Incorrect KDiv\n"); | 1484 | static const struct { |
| 1485 | const int *list; | ||
| 1486 | int n_dividers; | ||
| 1487 | } dividers[] = { | ||
| 1488 | { even_dividers, ARRAY_SIZE(even_dividers) }, | ||
| 1489 | { odd_dividers, ARRAY_SIZE(odd_dividers) }, | ||
| 1490 | }; | ||
| 1491 | struct skl_wrpll_context ctx; | ||
| 1492 | unsigned int dco, d, i; | ||
| 1493 | unsigned int p0, p1, p2; | ||
| 1494 | |||
| 1495 | skl_wrpll_context_init(&ctx); | ||
| 1496 | |||
| 1497 | for (d = 0; d < ARRAY_SIZE(dividers); d++) { | ||
| 1498 | for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { | ||
| 1499 | for (i = 0; i < dividers[d].n_dividers; i++) { | ||
| 1500 | unsigned int p = dividers[d].list[i]; | ||
| 1501 | uint64_t dco_freq = p * afe_clock; | ||
| 1502 | |||
| 1503 | skl_wrpll_try_divider(&ctx, | ||
| 1504 | dco_central_freq[dco], | ||
| 1505 | dco_freq, | ||
| 1506 | p); | ||
| 1507 | /* | ||
| 1508 | * Skip the remaining dividers if we're sure to | ||
| 1509 | * have found the definitive divider, we can't | ||
| 1510 | * improve a 0 deviation. | ||
| 1511 | */ | ||
| 1512 | if (ctx.min_deviation == 0) | ||
| 1513 | goto skip_remaining_dividers; | ||
| 1514 | } | ||
| 1240 | } | 1515 | } |
| 1241 | 1516 | ||
| 1242 | wrpll_params->qdiv_ratio = candidate_p1[min_dco_index]; | 1517 | skip_remaining_dividers: |
| 1243 | wrpll_params->qdiv_mode = | ||
| 1244 | (wrpll_params->qdiv_ratio == 1) ? 0 : 1; | ||
| 1245 | |||
| 1246 | dco_freq = candidate_p0[min_dco_index] * | ||
| 1247 | candidate_p1[min_dco_index] * | ||
| 1248 | candidate_p2[min_dco_index] * afe_clock; | ||
| 1249 | |||
| 1250 | /* | 1518 | /* |
| 1251 | * Intermediate values are in Hz. | 1519 | * If a solution is found with an even divider, prefer |
| 1252 | * Divide by MHz to match bsepc | 1520 | * this one. |
| 1253 | */ | 1521 | */ |
| 1254 | wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1))); | 1522 | if (d == 0 && ctx.p) |
| 1255 | wrpll_params->dco_fraction = | 1523 | break; |
| 1256 | div_u64(((div_u64(dco_freq, 24) - | 1524 | } |
| 1257 | wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1)); | ||
| 1258 | 1525 | ||
| 1526 | if (!ctx.p) { | ||
| 1527 | DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); | ||
| 1528 | return false; | ||
| 1259 | } | 1529 | } |
| 1260 | } | ||
| 1261 | 1530 | ||
| 1531 | /* | ||
| 1532 | * gcc incorrectly analyses that these can be used without being | ||
| 1533 | * initialized. To be fair, it's hard to guess. | ||
| 1534 | */ | ||
| 1535 | p0 = p1 = p2 = 0; | ||
| 1536 | skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); | ||
| 1537 | skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, | ||
| 1538 | p0, p1, p2); | ||
| 1539 | |||
| 1540 | return true; | ||
| 1541 | } | ||
| 1262 | 1542 | ||
| 1263 | static bool | 1543 | static bool |
| 1264 | skl_ddi_pll_select(struct intel_crtc *intel_crtc, | 1544 | skl_ddi_pll_select(struct intel_crtc *intel_crtc, |
| @@ -1281,7 +1561,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1281 | 1561 | ||
| 1282 | ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); | 1562 | ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); |
| 1283 | 1563 | ||
| 1284 | skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params); | 1564 | if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params)) |
| 1565 | return false; | ||
| 1285 | 1566 | ||
| 1286 | cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | | 1567 | cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | |
| 1287 | DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | | 1568 | DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | |
| @@ -1334,6 +1615,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1334 | 1615 | ||
| 1335 | /* bxt clock parameters */ | 1616 | /* bxt clock parameters */ |
| 1336 | struct bxt_clk_div { | 1617 | struct bxt_clk_div { |
| 1618 | int clock; | ||
| 1337 | uint32_t p1; | 1619 | uint32_t p1; |
| 1338 | uint32_t p2; | 1620 | uint32_t p2; |
| 1339 | uint32_t m2_int; | 1621 | uint32_t m2_int; |
| @@ -1343,14 +1625,14 @@ struct bxt_clk_div { | |||
| 1343 | }; | 1625 | }; |
| 1344 | 1626 | ||
| 1345 | /* pre-calculated values for DP linkrates */ | 1627 | /* pre-calculated values for DP linkrates */ |
| 1346 | static struct bxt_clk_div bxt_dp_clk_val[7] = { | 1628 | static const struct bxt_clk_div bxt_dp_clk_val[] = { |
| 1347 | /* 162 */ {4, 2, 32, 1677722, 1, 1}, | 1629 | {162000, 4, 2, 32, 1677722, 1, 1}, |
| 1348 | /* 270 */ {4, 1, 27, 0, 0, 1}, | 1630 | {270000, 4, 1, 27, 0, 0, 1}, |
| 1349 | /* 540 */ {2, 1, 27, 0, 0, 1}, | 1631 | {540000, 2, 1, 27, 0, 0, 1}, |
| 1350 | /* 216 */ {3, 2, 32, 1677722, 1, 1}, | 1632 | {216000, 3, 2, 32, 1677722, 1, 1}, |
| 1351 | /* 243 */ {4, 1, 24, 1258291, 1, 1}, | 1633 | {243000, 4, 1, 24, 1258291, 1, 1}, |
| 1352 | /* 324 */ {4, 1, 32, 1677722, 1, 1}, | 1634 | {324000, 4, 1, 32, 1677722, 1, 1}, |
| 1353 | /* 432 */ {3, 1, 32, 1677722, 1, 1} | 1635 | {432000, 3, 1, 32, 1677722, 1, 1} |
| 1354 | }; | 1636 | }; |
| 1355 | 1637 | ||
| 1356 | static bool | 1638 | static bool |
| @@ -1363,7 +1645,7 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1363 | struct bxt_clk_div clk_div = {0}; | 1645 | struct bxt_clk_div clk_div = {0}; |
| 1364 | int vco = 0; | 1646 | int vco = 0; |
| 1365 | uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; | 1647 | uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; |
| 1366 | uint32_t dcoampovr_en_h, dco_amp, lanestagger; | 1648 | uint32_t lanestagger; |
| 1367 | 1649 | ||
| 1368 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { | 1650 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
| 1369 | intel_clock_t best_clock; | 1651 | intel_clock_t best_clock; |
| @@ -1390,29 +1672,19 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1390 | vco = best_clock.vco; | 1672 | vco = best_clock.vco; |
| 1391 | } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || | 1673 | } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
| 1392 | intel_encoder->type == INTEL_OUTPUT_EDP) { | 1674 | intel_encoder->type == INTEL_OUTPUT_EDP) { |
| 1393 | struct drm_encoder *encoder = &intel_encoder->base; | 1675 | int i; |
| 1394 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
| 1395 | 1676 | ||
| 1396 | switch (intel_dp->link_bw) { | 1677 | clk_div = bxt_dp_clk_val[0]; |
| 1397 | case DP_LINK_BW_1_62: | 1678 | for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) { |
| 1398 | clk_div = bxt_dp_clk_val[0]; | 1679 | if (bxt_dp_clk_val[i].clock == clock) { |
| 1399 | break; | 1680 | clk_div = bxt_dp_clk_val[i]; |
| 1400 | case DP_LINK_BW_2_7: | 1681 | break; |
| 1401 | clk_div = bxt_dp_clk_val[1]; | 1682 | } |
| 1402 | break; | ||
| 1403 | case DP_LINK_BW_5_4: | ||
| 1404 | clk_div = bxt_dp_clk_val[2]; | ||
| 1405 | break; | ||
| 1406 | default: | ||
| 1407 | clk_div = bxt_dp_clk_val[0]; | ||
| 1408 | DRM_ERROR("Unknown link rate\n"); | ||
| 1409 | } | 1683 | } |
| 1410 | vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2; | 1684 | vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2; |
| 1411 | } | 1685 | } |
| 1412 | 1686 | ||
| 1413 | dco_amp = 15; | 1687 | if (vco >= 6200000 && vco <= 6700000) { |
| 1414 | dcoampovr_en_h = 0; | ||
| 1415 | if (vco >= 6200000 && vco <= 6480000) { | ||
| 1416 | prop_coef = 4; | 1688 | prop_coef = 4; |
| 1417 | int_coef = 9; | 1689 | int_coef = 9; |
| 1418 | gain_ctl = 3; | 1690 | gain_ctl = 3; |
| @@ -1423,8 +1695,6 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1423 | int_coef = 11; | 1695 | int_coef = 11; |
| 1424 | gain_ctl = 3; | 1696 | gain_ctl = 3; |
| 1425 | targ_cnt = 9; | 1697 | targ_cnt = 9; |
| 1426 | if (vco >= 4800000 && vco < 5400000) | ||
| 1427 | dcoampovr_en_h = 1; | ||
| 1428 | } else if (vco == 5400000) { | 1698 | } else if (vco == 5400000) { |
| 1429 | prop_coef = 3; | 1699 | prop_coef = 3; |
| 1430 | int_coef = 8; | 1700 | int_coef = 8; |
| @@ -1466,10 +1736,13 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1466 | 1736 | ||
| 1467 | crtc_state->dpll_hw_state.pll8 = targ_cnt; | 1737 | crtc_state->dpll_hw_state.pll8 = targ_cnt; |
| 1468 | 1738 | ||
| 1469 | if (dcoampovr_en_h) | 1739 | crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT; |
| 1470 | crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H; | 1740 | |
| 1741 | crtc_state->dpll_hw_state.pll10 = | ||
| 1742 | PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT) | ||
| 1743 | | PORT_PLL_DCO_AMP_OVR_EN_H; | ||
| 1471 | 1744 | ||
| 1472 | crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp); | 1745 | crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE; |
| 1473 | 1746 | ||
| 1474 | crtc_state->dpll_hw_state.pcsdw12 = | 1747 | crtc_state->dpll_hw_state.pcsdw12 = |
| 1475 | LANESTAGGER_STRAP_OVRD | lanestagger; | 1748 | LANESTAGGER_STRAP_OVRD | lanestagger; |
| @@ -1799,8 +2072,48 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) | |||
| 1799 | TRANS_CLK_SEL_DISABLED); | 2072 | TRANS_CLK_SEL_DISABLED); |
| 1800 | } | 2073 | } |
| 1801 | 2074 | ||
| 1802 | void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, | 2075 | static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, |
| 1803 | enum port port, int type) | 2076 | enum port port, int type) |
| 2077 | { | ||
| 2078 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2079 | const struct ddi_buf_trans *ddi_translations; | ||
| 2080 | uint8_t iboost; | ||
| 2081 | int n_entries; | ||
| 2082 | u32 reg; | ||
| 2083 | |||
| 2084 | if (type == INTEL_OUTPUT_DISPLAYPORT) { | ||
| 2085 | ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); | ||
| 2086 | iboost = ddi_translations[port].i_boost; | ||
| 2087 | } else if (type == INTEL_OUTPUT_EDP) { | ||
| 2088 | ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); | ||
| 2089 | iboost = ddi_translations[port].i_boost; | ||
| 2090 | } else if (type == INTEL_OUTPUT_HDMI) { | ||
| 2091 | ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); | ||
| 2092 | iboost = ddi_translations[port].i_boost; | ||
| 2093 | } else { | ||
| 2094 | return; | ||
| 2095 | } | ||
| 2096 | |||
| 2097 | /* Make sure that the requested I_boost is valid */ | ||
| 2098 | if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) { | ||
| 2099 | DRM_ERROR("Invalid I_boost value %u\n", iboost); | ||
| 2100 | return; | ||
| 2101 | } | ||
| 2102 | |||
| 2103 | reg = I915_READ(DISPIO_CR_TX_BMU_CR0); | ||
| 2104 | reg &= ~BALANCE_LEG_MASK(port); | ||
| 2105 | reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port)); | ||
| 2106 | |||
| 2107 | if (iboost) | ||
| 2108 | reg |= iboost << BALANCE_LEG_SHIFT(port); | ||
| 2109 | else | ||
| 2110 | reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port); | ||
| 2111 | |||
| 2112 | I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); | ||
| 2113 | } | ||
| 2114 | |||
| 2115 | static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, | ||
| 2116 | enum port port, int type) | ||
| 1804 | { | 2117 | { |
| 1805 | struct drm_i915_private *dev_priv = dev->dev_private; | 2118 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1806 | const struct bxt_ddi_buf_trans *ddi_translations; | 2119 | const struct bxt_ddi_buf_trans *ddi_translations; |
| @@ -1860,6 +2173,73 @@ void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, | |||
| 1860 | I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val); | 2173 | I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val); |
| 1861 | } | 2174 | } |
| 1862 | 2175 | ||
| 2176 | static uint32_t translate_signal_level(int signal_levels) | ||
| 2177 | { | ||
| 2178 | uint32_t level; | ||
| 2179 | |||
| 2180 | switch (signal_levels) { | ||
| 2181 | default: | ||
| 2182 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n", | ||
| 2183 | signal_levels); | ||
| 2184 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 2185 | level = 0; | ||
| 2186 | break; | ||
| 2187 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 2188 | level = 1; | ||
| 2189 | break; | ||
| 2190 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: | ||
| 2191 | level = 2; | ||
| 2192 | break; | ||
| 2193 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3: | ||
| 2194 | level = 3; | ||
| 2195 | break; | ||
| 2196 | |||
| 2197 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 2198 | level = 4; | ||
| 2199 | break; | ||
| 2200 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 2201 | level = 5; | ||
| 2202 | break; | ||
| 2203 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: | ||
| 2204 | level = 6; | ||
| 2205 | break; | ||
| 2206 | |||
| 2207 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 2208 | level = 7; | ||
| 2209 | break; | ||
| 2210 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 2211 | level = 8; | ||
| 2212 | break; | ||
| 2213 | |||
| 2214 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 2215 | level = 9; | ||
| 2216 | break; | ||
| 2217 | } | ||
| 2218 | |||
| 2219 | return level; | ||
| 2220 | } | ||
| 2221 | |||
| 2222 | uint32_t ddi_signal_levels(struct intel_dp *intel_dp) | ||
| 2223 | { | ||
| 2224 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); | ||
| 2225 | struct drm_device *dev = dport->base.base.dev; | ||
| 2226 | struct intel_encoder *encoder = &dport->base; | ||
| 2227 | uint8_t train_set = intel_dp->train_set[0]; | ||
| 2228 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | ||
| 2229 | DP_TRAIN_PRE_EMPHASIS_MASK); | ||
| 2230 | enum port port = dport->port; | ||
| 2231 | uint32_t level; | ||
| 2232 | |||
| 2233 | level = translate_signal_level(signal_levels); | ||
| 2234 | |||
| 2235 | if (IS_SKYLAKE(dev)) | ||
| 2236 | skl_ddi_set_iboost(dev, level, port, encoder->type); | ||
| 2237 | else if (IS_BROXTON(dev)) | ||
| 2238 | bxt_ddi_vswing_sequence(dev, level, port, encoder->type); | ||
| 2239 | |||
| 2240 | return DDI_BUF_TRANS_SELECT(level); | ||
| 2241 | } | ||
| 2242 | |||
| 1863 | static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | 2243 | static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) |
| 1864 | { | 2244 | { |
| 1865 | struct drm_encoder *encoder = &intel_encoder->base; | 2245 | struct drm_encoder *encoder = &intel_encoder->base; |
| @@ -2404,7 +2784,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
| 2404 | 2784 | ||
| 2405 | temp = I915_READ(BXT_PORT_PLL(port, 9)); | 2785 | temp = I915_READ(BXT_PORT_PLL(port, 9)); |
| 2406 | temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; | 2786 | temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; |
| 2407 | temp |= (5 << 1); | 2787 | temp |= pll->config.hw_state.pll9; |
| 2408 | I915_WRITE(BXT_PORT_PLL(port, 9), temp); | 2788 | I915_WRITE(BXT_PORT_PLL(port, 9), temp); |
| 2409 | 2789 | ||
| 2410 | temp = I915_READ(BXT_PORT_PLL(port, 10)); | 2790 | temp = I915_READ(BXT_PORT_PLL(port, 10)); |
| @@ -2417,8 +2797,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
| 2417 | temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); | 2797 | temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); |
| 2418 | temp |= PORT_PLL_RECALIBRATE; | 2798 | temp |= PORT_PLL_RECALIBRATE; |
| 2419 | I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); | 2799 | I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); |
| 2420 | /* Enable 10 bit clock */ | 2800 | temp &= ~PORT_PLL_10BIT_CLK_ENABLE; |
| 2421 | temp |= PORT_PLL_10BIT_CLK_ENABLE; | 2801 | temp |= pll->config.hw_state.ebb4; |
| 2422 | I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); | 2802 | I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); |
| 2423 | 2803 | ||
| 2424 | /* Enable PLL */ | 2804 | /* Enable PLL */ |
| @@ -2469,13 +2849,38 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
| 2469 | return false; | 2849 | return false; |
| 2470 | 2850 | ||
| 2471 | hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); | 2851 | hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); |
| 2852 | hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; | ||
| 2853 | |||
| 2854 | hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port)); | ||
| 2855 | hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; | ||
| 2856 | |||
| 2472 | hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0)); | 2857 | hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0)); |
| 2858 | hw_state->pll0 &= PORT_PLL_M2_MASK; | ||
| 2859 | |||
| 2473 | hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1)); | 2860 | hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1)); |
| 2861 | hw_state->pll1 &= PORT_PLL_N_MASK; | ||
| 2862 | |||
| 2474 | hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2)); | 2863 | hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2)); |
| 2864 | hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; | ||
| 2865 | |||
| 2475 | hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3)); | 2866 | hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3)); |
| 2867 | hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; | ||
| 2868 | |||
| 2476 | hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6)); | 2869 | hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6)); |
| 2870 | hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | | ||
| 2871 | PORT_PLL_INT_COEFF_MASK | | ||
| 2872 | PORT_PLL_GAIN_CTL_MASK; | ||
| 2873 | |||
| 2477 | hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8)); | 2874 | hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8)); |
| 2875 | hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; | ||
| 2876 | |||
| 2877 | hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9)); | ||
| 2878 | hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; | ||
| 2879 | |||
| 2478 | hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10)); | 2880 | hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10)); |
| 2881 | hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | | ||
| 2882 | PORT_PLL_DCO_AMP_MASK; | ||
| 2883 | |||
| 2479 | /* | 2884 | /* |
| 2480 | * While we write to the group register to program all lanes at once we | 2885 | * While we write to the group register to program all lanes at once we |
| 2481 | * can read only lane registers. We configure all lanes the same way, so | 2886 | * can read only lane registers. We configure all lanes the same way, so |
| @@ -2486,6 +2891,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
| 2486 | DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", | 2891 | DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", |
| 2487 | hw_state->pcsdw12, | 2892 | hw_state->pcsdw12, |
| 2488 | I915_READ(BXT_PORT_PCS_DW12_LN23(port))); | 2893 | I915_READ(BXT_PORT_PCS_DW12_LN23(port))); |
| 2894 | hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; | ||
| 2489 | 2895 | ||
| 2490 | return true; | 2896 | return true; |
| 2491 | } | 2897 | } |
| @@ -2510,7 +2916,6 @@ void intel_ddi_pll_init(struct drm_device *dev) | |||
| 2510 | { | 2916 | { |
| 2511 | struct drm_i915_private *dev_priv = dev->dev_private; | 2917 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2512 | uint32_t val = I915_READ(LCPLL_CTL); | 2918 | uint32_t val = I915_READ(LCPLL_CTL); |
| 2513 | int cdclk_freq; | ||
| 2514 | 2919 | ||
| 2515 | if (IS_SKYLAKE(dev)) | 2920 | if (IS_SKYLAKE(dev)) |
| 2516 | skl_shared_dplls_init(dev_priv); | 2921 | skl_shared_dplls_init(dev_priv); |
| @@ -2519,10 +2924,10 @@ void intel_ddi_pll_init(struct drm_device *dev) | |||
| 2519 | else | 2924 | else |
| 2520 | hsw_shared_dplls_init(dev_priv); | 2925 | hsw_shared_dplls_init(dev_priv); |
| 2521 | 2926 | ||
| 2522 | cdclk_freq = dev_priv->display.get_display_clock_speed(dev); | ||
| 2523 | DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq); | ||
| 2524 | |||
| 2525 | if (IS_SKYLAKE(dev)) { | 2927 | if (IS_SKYLAKE(dev)) { |
| 2928 | int cdclk_freq; | ||
| 2929 | |||
| 2930 | cdclk_freq = dev_priv->display.get_display_clock_speed(dev); | ||
| 2526 | dev_priv->skl_boot_cdclk = cdclk_freq; | 2931 | dev_priv->skl_boot_cdclk = cdclk_freq; |
| 2527 | if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) | 2932 | if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) |
| 2528 | DRM_ERROR("LCPLL1 is disabled\n"); | 2933 | DRM_ERROR("LCPLL1 is disabled\n"); |
| @@ -2618,20 +3023,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) | |||
| 2618 | I915_WRITE(_FDI_RXA_CTL, val); | 3023 | I915_WRITE(_FDI_RXA_CTL, val); |
| 2619 | } | 3024 | } |
| 2620 | 3025 | ||
| 2621 | static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) | ||
| 2622 | { | ||
| 2623 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base); | ||
| 2624 | int type = intel_dig_port->base.type; | ||
| 2625 | |||
| 2626 | if (type != INTEL_OUTPUT_DISPLAYPORT && | ||
| 2627 | type != INTEL_OUTPUT_EDP && | ||
| 2628 | type != INTEL_OUTPUT_UNKNOWN) { | ||
| 2629 | return; | ||
| 2630 | } | ||
| 2631 | |||
| 2632 | intel_dp_hot_plug(intel_encoder); | ||
| 2633 | } | ||
| 2634 | |||
| 2635 | void intel_ddi_get_config(struct intel_encoder *encoder, | 3026 | void intel_ddi_get_config(struct intel_encoder *encoder, |
| 2636 | struct intel_crtc_state *pipe_config) | 3027 | struct intel_crtc_state *pipe_config) |
| 2637 | { | 3028 | { |
| @@ -2825,14 +3216,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
| 2825 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 3216 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
| 2826 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 3217 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
| 2827 | intel_encoder->cloneable = 0; | 3218 | intel_encoder->cloneable = 0; |
| 2828 | intel_encoder->hot_plug = intel_ddi_hot_plug; | ||
| 2829 | 3219 | ||
| 2830 | if (init_dp) { | 3220 | if (init_dp) { |
| 2831 | if (!intel_ddi_init_dp_connector(intel_dig_port)) | 3221 | if (!intel_ddi_init_dp_connector(intel_dig_port)) |
| 2832 | goto err; | 3222 | goto err; |
| 2833 | 3223 | ||
| 2834 | intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; | 3224 | intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; |
| 2835 | dev_priv->hpd_irq_port[port] = intel_dig_port; | 3225 | dev_priv->hotplug.irq_port[port] = intel_dig_port; |
| 2836 | } | 3226 | } |
| 2837 | 3227 | ||
| 2838 | /* In theory we don't need the encoder->type check, but leave it just in | 3228 | /* In theory we don't need the encoder->type check, but leave it just in |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 30e0f54ba19d..af0bcfee4771 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -86,9 +86,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | |||
| 86 | static void ironlake_pch_clock_get(struct intel_crtc *crtc, | 86 | static void ironlake_pch_clock_get(struct intel_crtc *crtc, |
| 87 | struct intel_crtc_state *pipe_config); | 87 | struct intel_crtc_state *pipe_config); |
| 88 | 88 | ||
| 89 | static int intel_set_mode(struct drm_crtc *crtc, | ||
| 90 | struct drm_atomic_state *state, | ||
| 91 | bool force_restore); | ||
| 92 | static int intel_framebuffer_init(struct drm_device *dev, | 89 | static int intel_framebuffer_init(struct drm_device *dev, |
| 93 | struct intel_framebuffer *ifb, | 90 | struct intel_framebuffer *ifb, |
| 94 | struct drm_mode_fb_cmd2 *mode_cmd, | 91 | struct drm_mode_fb_cmd2 *mode_cmd, |
| @@ -111,16 +108,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr | |||
| 111 | struct intel_crtc_state *crtc_state); | 108 | struct intel_crtc_state *crtc_state); |
| 112 | static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, | 109 | static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, |
| 113 | int num_connectors); | 110 | int num_connectors); |
| 114 | static void intel_crtc_enable_planes(struct drm_crtc *crtc); | 111 | static void intel_modeset_setup_hw_state(struct drm_device *dev); |
| 115 | static void intel_crtc_disable_planes(struct drm_crtc *crtc); | ||
| 116 | |||
| 117 | static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) | ||
| 118 | { | ||
| 119 | if (!connector->mst_port) | ||
| 120 | return connector->encoder; | ||
| 121 | else | ||
| 122 | return &connector->mst_port->mst_encoders[pipe]->base; | ||
| 123 | } | ||
| 124 | 112 | ||
| 125 | typedef struct { | 113 | typedef struct { |
| 126 | int min, max; | 114 | int min, max; |
| @@ -413,7 +401,7 @@ static const intel_limit_t intel_limits_chv = { | |||
| 413 | static const intel_limit_t intel_limits_bxt = { | 401 | static const intel_limit_t intel_limits_bxt = { |
| 414 | /* FIXME: find real dot limits */ | 402 | /* FIXME: find real dot limits */ |
| 415 | .dot = { .min = 0, .max = INT_MAX }, | 403 | .dot = { .min = 0, .max = INT_MAX }, |
| 416 | .vco = { .min = 4800000, .max = 6480000 }, | 404 | .vco = { .min = 4800000, .max = 6700000 }, |
| 417 | .n = { .min = 1, .max = 1 }, | 405 | .n = { .min = 1, .max = 1 }, |
| 418 | .m1 = { .min = 2, .max = 2 }, | 406 | .m1 = { .min = 2, .max = 2 }, |
| 419 | /* FIXME: find real m2 limits */ | 407 | /* FIXME: find real m2 limits */ |
| @@ -422,14 +410,10 @@ static const intel_limit_t intel_limits_bxt = { | |||
| 422 | .p2 = { .p2_slow = 1, .p2_fast = 20 }, | 410 | .p2 = { .p2_slow = 1, .p2_fast = 20 }, |
| 423 | }; | 411 | }; |
| 424 | 412 | ||
| 425 | static void vlv_clock(int refclk, intel_clock_t *clock) | 413 | static bool |
| 414 | needs_modeset(struct drm_crtc_state *state) | ||
| 426 | { | 415 | { |
| 427 | clock->m = clock->m1 * clock->m2; | 416 | return state->mode_changed || state->active_changed; |
| 428 | clock->p = clock->p1 * clock->p2; | ||
| 429 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | ||
| 430 | return; | ||
| 431 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | ||
| 432 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | ||
| 433 | } | 417 | } |
| 434 | 418 | ||
| 435 | /** | 419 | /** |
| @@ -561,15 +545,25 @@ intel_limit(struct intel_crtc_state *crtc_state, int refclk) | |||
| 561 | return limit; | 545 | return limit; |
| 562 | } | 546 | } |
| 563 | 547 | ||
| 548 | /* | ||
| 549 | * Platform specific helpers to calculate the port PLL loopback- (clock.m), | ||
| 550 | * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast | ||
| 551 | * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. | ||
| 552 | * The helpers' return value is the rate of the clock that is fed to the | ||
| 553 | * display engine's pipe which can be the above fast dot clock rate or a | ||
| 554 | * divided-down version of it. | ||
| 555 | */ | ||
| 564 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ | 556 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
| 565 | static void pineview_clock(int refclk, intel_clock_t *clock) | 557 | static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) |
| 566 | { | 558 | { |
| 567 | clock->m = clock->m2 + 2; | 559 | clock->m = clock->m2 + 2; |
| 568 | clock->p = clock->p1 * clock->p2; | 560 | clock->p = clock->p1 * clock->p2; |
| 569 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | 561 | if (WARN_ON(clock->n == 0 || clock->p == 0)) |
| 570 | return; | 562 | return 0; |
| 571 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | 563 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); |
| 572 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 564 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
| 565 | |||
| 566 | return clock->dot; | ||
| 573 | } | 567 | } |
| 574 | 568 | ||
| 575 | static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) | 569 | static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) |
| @@ -577,25 +571,41 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) | |||
| 577 | return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); | 571 | return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); |
| 578 | } | 572 | } |
| 579 | 573 | ||
| 580 | static void i9xx_clock(int refclk, intel_clock_t *clock) | 574 | static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) |
| 581 | { | 575 | { |
| 582 | clock->m = i9xx_dpll_compute_m(clock); | 576 | clock->m = i9xx_dpll_compute_m(clock); |
| 583 | clock->p = clock->p1 * clock->p2; | 577 | clock->p = clock->p1 * clock->p2; |
| 584 | if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) | 578 | if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) |
| 585 | return; | 579 | return 0; |
| 586 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); | 580 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); |
| 587 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 581 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
| 582 | |||
| 583 | return clock->dot; | ||
| 588 | } | 584 | } |
| 589 | 585 | ||
| 590 | static void chv_clock(int refclk, intel_clock_t *clock) | 586 | static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) |
| 591 | { | 587 | { |
| 592 | clock->m = clock->m1 * clock->m2; | 588 | clock->m = clock->m1 * clock->m2; |
| 593 | clock->p = clock->p1 * clock->p2; | 589 | clock->p = clock->p1 * clock->p2; |
| 594 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | 590 | if (WARN_ON(clock->n == 0 || clock->p == 0)) |
| 595 | return; | 591 | return 0; |
| 592 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | ||
| 593 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | ||
| 594 | |||
| 595 | return clock->dot / 5; | ||
| 596 | } | ||
| 597 | |||
| 598 | int chv_calc_dpll_params(int refclk, intel_clock_t *clock) | ||
| 599 | { | ||
| 600 | clock->m = clock->m1 * clock->m2; | ||
| 601 | clock->p = clock->p1 * clock->p2; | ||
| 602 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | ||
| 603 | return 0; | ||
| 596 | clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, | 604 | clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, |
| 597 | clock->n << 22); | 605 | clock->n << 22); |
| 598 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 606 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
| 607 | |||
| 608 | return clock->dot / 5; | ||
| 599 | } | 609 | } |
| 600 | 610 | ||
| 601 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 611 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
| @@ -639,16 +649,12 @@ static bool intel_PLL_is_valid(struct drm_device *dev, | |||
| 639 | return true; | 649 | return true; |
| 640 | } | 650 | } |
| 641 | 651 | ||
| 642 | static bool | 652 | static int |
| 643 | i9xx_find_best_dpll(const intel_limit_t *limit, | 653 | i9xx_select_p2_div(const intel_limit_t *limit, |
| 644 | struct intel_crtc_state *crtc_state, | 654 | const struct intel_crtc_state *crtc_state, |
| 645 | int target, int refclk, intel_clock_t *match_clock, | 655 | int target) |
| 646 | intel_clock_t *best_clock) | ||
| 647 | { | 656 | { |
| 648 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | 657 | struct drm_device *dev = crtc_state->base.crtc->dev; |
| 649 | struct drm_device *dev = crtc->base.dev; | ||
| 650 | intel_clock_t clock; | ||
| 651 | int err = target; | ||
| 652 | 658 | ||
| 653 | if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { | 659 | if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { |
| 654 | /* | 660 | /* |
| @@ -657,18 +663,31 @@ i9xx_find_best_dpll(const intel_limit_t *limit, | |||
| 657 | * single/dual channel state, if we even can. | 663 | * single/dual channel state, if we even can. |
| 658 | */ | 664 | */ |
| 659 | if (intel_is_dual_link_lvds(dev)) | 665 | if (intel_is_dual_link_lvds(dev)) |
| 660 | clock.p2 = limit->p2.p2_fast; | 666 | return limit->p2.p2_fast; |
| 661 | else | 667 | else |
| 662 | clock.p2 = limit->p2.p2_slow; | 668 | return limit->p2.p2_slow; |
| 663 | } else { | 669 | } else { |
| 664 | if (target < limit->p2.dot_limit) | 670 | if (target < limit->p2.dot_limit) |
| 665 | clock.p2 = limit->p2.p2_slow; | 671 | return limit->p2.p2_slow; |
| 666 | else | 672 | else |
| 667 | clock.p2 = limit->p2.p2_fast; | 673 | return limit->p2.p2_fast; |
| 668 | } | 674 | } |
| 675 | } | ||
| 676 | |||
| 677 | static bool | ||
| 678 | i9xx_find_best_dpll(const intel_limit_t *limit, | ||
| 679 | struct intel_crtc_state *crtc_state, | ||
| 680 | int target, int refclk, intel_clock_t *match_clock, | ||
| 681 | intel_clock_t *best_clock) | ||
| 682 | { | ||
| 683 | struct drm_device *dev = crtc_state->base.crtc->dev; | ||
| 684 | intel_clock_t clock; | ||
| 685 | int err = target; | ||
| 669 | 686 | ||
| 670 | memset(best_clock, 0, sizeof(*best_clock)); | 687 | memset(best_clock, 0, sizeof(*best_clock)); |
| 671 | 688 | ||
| 689 | clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); | ||
| 690 | |||
| 672 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; | 691 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
| 673 | clock.m1++) { | 692 | clock.m1++) { |
| 674 | for (clock.m2 = limit->m2.min; | 693 | for (clock.m2 = limit->m2.min; |
| @@ -681,7 +700,7 @@ i9xx_find_best_dpll(const intel_limit_t *limit, | |||
| 681 | clock.p1 <= limit->p1.max; clock.p1++) { | 700 | clock.p1 <= limit->p1.max; clock.p1++) { |
| 682 | int this_err; | 701 | int this_err; |
| 683 | 702 | ||
| 684 | i9xx_clock(refclk, &clock); | 703 | i9xx_calc_dpll_params(refclk, &clock); |
| 685 | if (!intel_PLL_is_valid(dev, limit, | 704 | if (!intel_PLL_is_valid(dev, limit, |
| 686 | &clock)) | 705 | &clock)) |
| 687 | continue; | 706 | continue; |
| @@ -708,30 +727,14 @@ pnv_find_best_dpll(const intel_limit_t *limit, | |||
| 708 | int target, int refclk, intel_clock_t *match_clock, | 727 | int target, int refclk, intel_clock_t *match_clock, |
| 709 | intel_clock_t *best_clock) | 728 | intel_clock_t *best_clock) |
| 710 | { | 729 | { |
| 711 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | 730 | struct drm_device *dev = crtc_state->base.crtc->dev; |
| 712 | struct drm_device *dev = crtc->base.dev; | ||
| 713 | intel_clock_t clock; | 731 | intel_clock_t clock; |
| 714 | int err = target; | 732 | int err = target; |
| 715 | 733 | ||
| 716 | if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { | ||
| 717 | /* | ||
| 718 | * For LVDS just rely on its current settings for dual-channel. | ||
| 719 | * We haven't figured out how to reliably set up different | ||
| 720 | * single/dual channel state, if we even can. | ||
| 721 | */ | ||
| 722 | if (intel_is_dual_link_lvds(dev)) | ||
| 723 | clock.p2 = limit->p2.p2_fast; | ||
| 724 | else | ||
| 725 | clock.p2 = limit->p2.p2_slow; | ||
| 726 | } else { | ||
| 727 | if (target < limit->p2.dot_limit) | ||
| 728 | clock.p2 = limit->p2.p2_slow; | ||
| 729 | else | ||
| 730 | clock.p2 = limit->p2.p2_fast; | ||
| 731 | } | ||
| 732 | |||
| 733 | memset(best_clock, 0, sizeof(*best_clock)); | 734 | memset(best_clock, 0, sizeof(*best_clock)); |
| 734 | 735 | ||
| 736 | clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); | ||
| 737 | |||
| 735 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; | 738 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
| 736 | clock.m1++) { | 739 | clock.m1++) { |
| 737 | for (clock.m2 = limit->m2.min; | 740 | for (clock.m2 = limit->m2.min; |
| @@ -742,7 +745,7 @@ pnv_find_best_dpll(const intel_limit_t *limit, | |||
| 742 | clock.p1 <= limit->p1.max; clock.p1++) { | 745 | clock.p1 <= limit->p1.max; clock.p1++) { |
| 743 | int this_err; | 746 | int this_err; |
| 744 | 747 | ||
| 745 | pineview_clock(refclk, &clock); | 748 | pnv_calc_dpll_params(refclk, &clock); |
| 746 | if (!intel_PLL_is_valid(dev, limit, | 749 | if (!intel_PLL_is_valid(dev, limit, |
| 747 | &clock)) | 750 | &clock)) |
| 748 | continue; | 751 | continue; |
| @@ -769,28 +772,17 @@ g4x_find_best_dpll(const intel_limit_t *limit, | |||
| 769 | int target, int refclk, intel_clock_t *match_clock, | 772 | int target, int refclk, intel_clock_t *match_clock, |
| 770 | intel_clock_t *best_clock) | 773 | intel_clock_t *best_clock) |
| 771 | { | 774 | { |
| 772 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | 775 | struct drm_device *dev = crtc_state->base.crtc->dev; |
| 773 | struct drm_device *dev = crtc->base.dev; | ||
| 774 | intel_clock_t clock; | 776 | intel_clock_t clock; |
| 775 | int max_n; | 777 | int max_n; |
| 776 | bool found; | 778 | bool found = false; |
| 777 | /* approximately equals target * 0.00585 */ | 779 | /* approximately equals target * 0.00585 */ |
| 778 | int err_most = (target >> 8) + (target >> 9); | 780 | int err_most = (target >> 8) + (target >> 9); |
| 779 | found = false; | ||
| 780 | |||
| 781 | if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { | ||
| 782 | if (intel_is_dual_link_lvds(dev)) | ||
| 783 | clock.p2 = limit->p2.p2_fast; | ||
| 784 | else | ||
| 785 | clock.p2 = limit->p2.p2_slow; | ||
| 786 | } else { | ||
| 787 | if (target < limit->p2.dot_limit) | ||
| 788 | clock.p2 = limit->p2.p2_slow; | ||
| 789 | else | ||
| 790 | clock.p2 = limit->p2.p2_fast; | ||
| 791 | } | ||
| 792 | 781 | ||
| 793 | memset(best_clock, 0, sizeof(*best_clock)); | 782 | memset(best_clock, 0, sizeof(*best_clock)); |
| 783 | |||
| 784 | clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); | ||
| 785 | |||
| 794 | max_n = limit->n.max; | 786 | max_n = limit->n.max; |
| 795 | /* based on hardware requirement, prefer smaller n to precision */ | 787 | /* based on hardware requirement, prefer smaller n to precision */ |
| 796 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { | 788 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
| @@ -803,7 +795,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, | |||
| 803 | clock.p1 >= limit->p1.min; clock.p1--) { | 795 | clock.p1 >= limit->p1.min; clock.p1--) { |
| 804 | int this_err; | 796 | int this_err; |
| 805 | 797 | ||
| 806 | i9xx_clock(refclk, &clock); | 798 | i9xx_calc_dpll_params(refclk, &clock); |
| 807 | if (!intel_PLL_is_valid(dev, limit, | 799 | if (!intel_PLL_is_valid(dev, limit, |
| 808 | &clock)) | 800 | &clock)) |
| 809 | continue; | 801 | continue; |
| @@ -893,7 +885,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, | |||
| 893 | clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, | 885 | clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, |
| 894 | refclk * clock.m1); | 886 | refclk * clock.m1); |
| 895 | 887 | ||
| 896 | vlv_clock(refclk, &clock); | 888 | vlv_calc_dpll_params(refclk, &clock); |
| 897 | 889 | ||
| 898 | if (!intel_PLL_is_valid(dev, limit, | 890 | if (!intel_PLL_is_valid(dev, limit, |
| 899 | &clock)) | 891 | &clock)) |
| @@ -956,7 +948,7 @@ chv_find_best_dpll(const intel_limit_t *limit, | |||
| 956 | 948 | ||
| 957 | clock.m2 = m2; | 949 | clock.m2 = m2; |
| 958 | 950 | ||
| 959 | chv_clock(refclk, &clock); | 951 | chv_calc_dpll_params(refclk, &clock); |
| 960 | 952 | ||
| 961 | if (!intel_PLL_is_valid(dev, limit, &clock)) | 953 | if (!intel_PLL_is_valid(dev, limit, &clock)) |
| 962 | continue; | 954 | continue; |
| @@ -1026,7 +1018,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) | |||
| 1026 | line_mask = DSL_LINEMASK_GEN3; | 1018 | line_mask = DSL_LINEMASK_GEN3; |
| 1027 | 1019 | ||
| 1028 | line1 = I915_READ(reg) & line_mask; | 1020 | line1 = I915_READ(reg) & line_mask; |
| 1029 | mdelay(5); | 1021 | msleep(5); |
| 1030 | line2 = I915_READ(reg) & line_mask; | 1022 | line2 = I915_READ(reg) & line_mask; |
| 1031 | 1023 | ||
| 1032 | return line1 == line2; | 1024 | return line1 == line2; |
| @@ -1694,7 +1686,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev) | |||
| 1694 | int count = 0; | 1686 | int count = 0; |
| 1695 | 1687 | ||
| 1696 | for_each_intel_crtc(dev, crtc) | 1688 | for_each_intel_crtc(dev, crtc) |
| 1697 | count += crtc->active && | 1689 | count += crtc->base.state->active && |
| 1698 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); | 1690 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); |
| 1699 | 1691 | ||
| 1700 | return count; | 1692 | return count; |
| @@ -1775,7 +1767,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc) | |||
| 1775 | /* Disable DVO 2x clock on both PLLs if necessary */ | 1767 | /* Disable DVO 2x clock on both PLLs if necessary */ |
| 1776 | if (IS_I830(dev) && | 1768 | if (IS_I830(dev) && |
| 1777 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && | 1769 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && |
| 1778 | intel_num_dvo_pipes(dev) == 1) { | 1770 | !intel_num_dvo_pipes(dev)) { |
| 1779 | I915_WRITE(DPLL(PIPE_B), | 1771 | I915_WRITE(DPLL(PIPE_B), |
| 1780 | I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); | 1772 | I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); |
| 1781 | I915_WRITE(DPLL(PIPE_A), | 1773 | I915_WRITE(DPLL(PIPE_A), |
| @@ -1790,13 +1782,13 @@ static void i9xx_disable_pll(struct intel_crtc *crtc) | |||
| 1790 | /* Make sure the pipe isn't still relying on us */ | 1782 | /* Make sure the pipe isn't still relying on us */ |
| 1791 | assert_pipe_disabled(dev_priv, pipe); | 1783 | assert_pipe_disabled(dev_priv, pipe); |
| 1792 | 1784 | ||
| 1793 | I915_WRITE(DPLL(pipe), 0); | 1785 | I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); |
| 1794 | POSTING_READ(DPLL(pipe)); | 1786 | POSTING_READ(DPLL(pipe)); |
| 1795 | } | 1787 | } |
| 1796 | 1788 | ||
| 1797 | static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | 1789 | static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
| 1798 | { | 1790 | { |
| 1799 | u32 val = 0; | 1791 | u32 val; |
| 1800 | 1792 | ||
| 1801 | /* Make sure the pipe isn't still relying on us */ | 1793 | /* Make sure the pipe isn't still relying on us */ |
| 1802 | assert_pipe_disabled(dev_priv, pipe); | 1794 | assert_pipe_disabled(dev_priv, pipe); |
| @@ -1805,8 +1797,9 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
| 1805 | * Leave integrated clock source and reference clock enabled for pipe B. | 1797 | * Leave integrated clock source and reference clock enabled for pipe B. |
| 1806 | * The latter is needed for VGA hotplug / manual detection. | 1798 | * The latter is needed for VGA hotplug / manual detection. |
| 1807 | */ | 1799 | */ |
| 1800 | val = DPLL_VGA_MODE_DIS; | ||
| 1808 | if (pipe == PIPE_B) | 1801 | if (pipe == PIPE_B) |
| 1809 | val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; | 1802 | val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV; |
| 1810 | I915_WRITE(DPLL(pipe), val); | 1803 | I915_WRITE(DPLL(pipe), val); |
| 1811 | POSTING_READ(DPLL(pipe)); | 1804 | POSTING_READ(DPLL(pipe)); |
| 1812 | 1805 | ||
| @@ -1821,7 +1814,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
| 1821 | assert_pipe_disabled(dev_priv, pipe); | 1814 | assert_pipe_disabled(dev_priv, pipe); |
| 1822 | 1815 | ||
| 1823 | /* Set PLL en = 0 */ | 1816 | /* Set PLL en = 0 */ |
| 1824 | val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV; | 1817 | val = DPLL_SSC_REF_CLK_CHV | |
| 1818 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; | ||
| 1825 | if (pipe != PIPE_A) | 1819 | if (pipe != PIPE_A) |
| 1826 | val |= DPLL_INTEGRATED_CRI_CLK_VLV; | 1820 | val |= DPLL_INTEGRATED_CRI_CLK_VLV; |
| 1827 | I915_WRITE(DPLL(pipe), val); | 1821 | I915_WRITE(DPLL(pipe), val); |
| @@ -1943,10 +1937,10 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc) | |||
| 1943 | 1937 | ||
| 1944 | /* PCH only available on ILK+ */ | 1938 | /* PCH only available on ILK+ */ |
| 1945 | BUG_ON(INTEL_INFO(dev)->gen < 5); | 1939 | BUG_ON(INTEL_INFO(dev)->gen < 5); |
| 1946 | if (WARN_ON(pll == NULL)) | 1940 | if (pll == NULL) |
| 1947 | return; | 1941 | return; |
| 1948 | 1942 | ||
| 1949 | if (WARN_ON(pll->config.crtc_mask == 0)) | 1943 | if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base))))) |
| 1950 | return; | 1944 | return; |
| 1951 | 1945 | ||
| 1952 | DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", | 1946 | DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", |
| @@ -2004,11 +1998,15 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, | |||
| 2004 | 1998 | ||
| 2005 | if (HAS_PCH_IBX(dev_priv->dev)) { | 1999 | if (HAS_PCH_IBX(dev_priv->dev)) { |
| 2006 | /* | 2000 | /* |
| 2007 | * make the BPC in transcoder be consistent with | 2001 | * Make the BPC in transcoder be consistent with |
| 2008 | * that in pipeconf reg. | 2002 | * that in pipeconf reg. For HDMI we must use 8bpc |
| 2003 | * here for both 8bpc and 12bpc. | ||
| 2009 | */ | 2004 | */ |
| 2010 | val &= ~PIPECONF_BPC_MASK; | 2005 | val &= ~PIPECONF_BPC_MASK; |
| 2011 | val |= pipeconf_val & PIPECONF_BPC_MASK; | 2006 | if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) |
| 2007 | val |= PIPECONF_8BPC; | ||
| 2008 | else | ||
| 2009 | val |= pipeconf_val & PIPECONF_BPC_MASK; | ||
| 2012 | } | 2010 | } |
| 2013 | 2011 | ||
| 2014 | val &= ~TRANS_INTERLACE_MASK; | 2012 | val &= ~TRANS_INTERLACE_MASK; |
| @@ -2122,6 +2120,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc) | |||
| 2122 | int reg; | 2120 | int reg; |
| 2123 | u32 val; | 2121 | u32 val; |
| 2124 | 2122 | ||
| 2123 | DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); | ||
| 2124 | |||
| 2125 | assert_planes_disabled(dev_priv, pipe); | 2125 | assert_planes_disabled(dev_priv, pipe); |
| 2126 | assert_cursor_disabled(dev_priv, pipe); | 2126 | assert_cursor_disabled(dev_priv, pipe); |
| 2127 | assert_sprites_disabled(dev_priv, pipe); | 2127 | assert_sprites_disabled(dev_priv, pipe); |
| @@ -2181,6 +2181,8 @@ static void intel_disable_pipe(struct intel_crtc *crtc) | |||
| 2181 | int reg; | 2181 | int reg; |
| 2182 | u32 val; | 2182 | u32 val; |
| 2183 | 2183 | ||
| 2184 | DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); | ||
| 2185 | |||
| 2184 | /* | 2186 | /* |
| 2185 | * Make sure planes won't keep trying to pump pixels to us, | 2187 | * Make sure planes won't keep trying to pump pixels to us, |
| 2186 | * or we might hang the display. | 2188 | * or we might hang the display. |
| @@ -2211,28 +2213,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc) | |||
| 2211 | intel_wait_for_pipe_off(crtc); | 2213 | intel_wait_for_pipe_off(crtc); |
| 2212 | } | 2214 | } |
| 2213 | 2215 | ||
| 2214 | /** | ||
| 2215 | * intel_enable_primary_hw_plane - enable the primary plane on a given pipe | ||
| 2216 | * @plane: plane to be enabled | ||
| 2217 | * @crtc: crtc for the plane | ||
| 2218 | * | ||
| 2219 | * Enable @plane on @crtc, making sure that the pipe is running first. | ||
| 2220 | */ | ||
| 2221 | static void intel_enable_primary_hw_plane(struct drm_plane *plane, | ||
| 2222 | struct drm_crtc *crtc) | ||
| 2223 | { | ||
| 2224 | struct drm_device *dev = plane->dev; | ||
| 2225 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2226 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 2227 | |||
| 2228 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ | ||
| 2229 | assert_pipe_enabled(dev_priv, intel_crtc->pipe); | ||
| 2230 | to_intel_plane_state(plane->state)->visible = true; | ||
| 2231 | |||
| 2232 | dev_priv->display.update_primary_plane(crtc, plane->fb, | ||
| 2233 | crtc->x, crtc->y); | ||
| 2234 | } | ||
| 2235 | |||
| 2236 | static bool need_vtd_wa(struct drm_device *dev) | 2216 | static bool need_vtd_wa(struct drm_device *dev) |
| 2237 | { | 2217 | { |
| 2238 | #ifdef CONFIG_INTEL_IOMMU | 2218 | #ifdef CONFIG_INTEL_IOMMU |
| @@ -2302,6 +2282,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, | |||
| 2302 | const struct drm_plane_state *plane_state) | 2282 | const struct drm_plane_state *plane_state) |
| 2303 | { | 2283 | { |
| 2304 | struct intel_rotation_info *info = &view->rotation_info; | 2284 | struct intel_rotation_info *info = &view->rotation_info; |
| 2285 | unsigned int tile_height, tile_pitch; | ||
| 2305 | 2286 | ||
| 2306 | *view = i915_ggtt_view_normal; | 2287 | *view = i915_ggtt_view_normal; |
| 2307 | 2288 | ||
| @@ -2318,14 +2299,35 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, | |||
| 2318 | info->pitch = fb->pitches[0]; | 2299 | info->pitch = fb->pitches[0]; |
| 2319 | info->fb_modifier = fb->modifier[0]; | 2300 | info->fb_modifier = fb->modifier[0]; |
| 2320 | 2301 | ||
| 2302 | tile_height = intel_tile_height(fb->dev, fb->pixel_format, | ||
| 2303 | fb->modifier[0]); | ||
| 2304 | tile_pitch = PAGE_SIZE / tile_height; | ||
| 2305 | info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch); | ||
| 2306 | info->height_pages = DIV_ROUND_UP(fb->height, tile_height); | ||
| 2307 | info->size = info->width_pages * info->height_pages * PAGE_SIZE; | ||
| 2308 | |||
| 2321 | return 0; | 2309 | return 0; |
| 2322 | } | 2310 | } |
| 2323 | 2311 | ||
| 2312 | static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) | ||
| 2313 | { | ||
| 2314 | if (INTEL_INFO(dev_priv)->gen >= 9) | ||
| 2315 | return 256 * 1024; | ||
| 2316 | else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || | ||
| 2317 | IS_VALLEYVIEW(dev_priv)) | ||
| 2318 | return 128 * 1024; | ||
| 2319 | else if (INTEL_INFO(dev_priv)->gen >= 4) | ||
| 2320 | return 4 * 1024; | ||
| 2321 | else | ||
| 2322 | return 0; | ||
| 2323 | } | ||
| 2324 | |||
| 2324 | int | 2325 | int |
| 2325 | intel_pin_and_fence_fb_obj(struct drm_plane *plane, | 2326 | intel_pin_and_fence_fb_obj(struct drm_plane *plane, |
| 2326 | struct drm_framebuffer *fb, | 2327 | struct drm_framebuffer *fb, |
| 2327 | const struct drm_plane_state *plane_state, | 2328 | const struct drm_plane_state *plane_state, |
| 2328 | struct intel_engine_cs *pipelined) | 2329 | struct intel_engine_cs *pipelined, |
| 2330 | struct drm_i915_gem_request **pipelined_request) | ||
| 2329 | { | 2331 | { |
| 2330 | struct drm_device *dev = fb->dev; | 2332 | struct drm_device *dev = fb->dev; |
| 2331 | struct drm_i915_private *dev_priv = dev->dev_private; | 2333 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -2338,14 +2340,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, | |||
| 2338 | 2340 | ||
| 2339 | switch (fb->modifier[0]) { | 2341 | switch (fb->modifier[0]) { |
| 2340 | case DRM_FORMAT_MOD_NONE: | 2342 | case DRM_FORMAT_MOD_NONE: |
| 2341 | if (INTEL_INFO(dev)->gen >= 9) | 2343 | alignment = intel_linear_alignment(dev_priv); |
| 2342 | alignment = 256 * 1024; | ||
| 2343 | else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
| 2344 | alignment = 128 * 1024; | ||
| 2345 | else if (INTEL_INFO(dev)->gen >= 4) | ||
| 2346 | alignment = 4 * 1024; | ||
| 2347 | else | ||
| 2348 | alignment = 64 * 1024; | ||
| 2349 | break; | 2344 | break; |
| 2350 | case I915_FORMAT_MOD_X_TILED: | 2345 | case I915_FORMAT_MOD_X_TILED: |
| 2351 | if (INTEL_INFO(dev)->gen >= 9) | 2346 | if (INTEL_INFO(dev)->gen >= 9) |
| @@ -2390,7 +2385,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, | |||
| 2390 | 2385 | ||
| 2391 | dev_priv->mm.interruptible = false; | 2386 | dev_priv->mm.interruptible = false; |
| 2392 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, | 2387 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, |
| 2393 | &view); | 2388 | pipelined_request, &view); |
| 2394 | if (ret) | 2389 | if (ret) |
| 2395 | goto err_interruptible; | 2390 | goto err_interruptible; |
| 2396 | 2391 | ||
| @@ -2435,7 +2430,8 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb, | |||
| 2435 | 2430 | ||
| 2436 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel | 2431 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel |
| 2437 | * is assumed to be a power-of-two. */ | 2432 | * is assumed to be a power-of-two. */ |
| 2438 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, | 2433 | unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, |
| 2434 | int *x, int *y, | ||
| 2439 | unsigned int tiling_mode, | 2435 | unsigned int tiling_mode, |
| 2440 | unsigned int cpp, | 2436 | unsigned int cpp, |
| 2441 | unsigned int pitch) | 2437 | unsigned int pitch) |
| @@ -2451,12 +2447,13 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, | |||
| 2451 | 2447 | ||
| 2452 | return tile_rows * pitch * 8 + tiles * 4096; | 2448 | return tile_rows * pitch * 8 + tiles * 4096; |
| 2453 | } else { | 2449 | } else { |
| 2450 | unsigned int alignment = intel_linear_alignment(dev_priv) - 1; | ||
| 2454 | unsigned int offset; | 2451 | unsigned int offset; |
| 2455 | 2452 | ||
| 2456 | offset = *y * pitch + *x * cpp; | 2453 | offset = *y * pitch + *x * cpp; |
| 2457 | *y = 0; | 2454 | *y = (offset & alignment) / pitch; |
| 2458 | *x = (offset & 4095) / cpp; | 2455 | *x = ((offset & alignment) - *y * pitch) / cpp; |
| 2459 | return offset & -4096; | 2456 | return offset & ~alignment; |
| 2460 | } | 2457 | } |
| 2461 | } | 2458 | } |
| 2462 | 2459 | ||
| @@ -2583,6 +2580,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
| 2583 | struct intel_crtc *i; | 2580 | struct intel_crtc *i; |
| 2584 | struct drm_i915_gem_object *obj; | 2581 | struct drm_i915_gem_object *obj; |
| 2585 | struct drm_plane *primary = intel_crtc->base.primary; | 2582 | struct drm_plane *primary = intel_crtc->base.primary; |
| 2583 | struct drm_plane_state *plane_state = primary->state; | ||
| 2586 | struct drm_framebuffer *fb; | 2584 | struct drm_framebuffer *fb; |
| 2587 | 2585 | ||
| 2588 | if (!plane_config->fb) | 2586 | if (!plane_config->fb) |
| @@ -2622,15 +2620,23 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
| 2622 | return; | 2620 | return; |
| 2623 | 2621 | ||
| 2624 | valid_fb: | 2622 | valid_fb: |
| 2623 | plane_state->src_x = plane_state->src_y = 0; | ||
| 2624 | plane_state->src_w = fb->width << 16; | ||
| 2625 | plane_state->src_h = fb->height << 16; | ||
| 2626 | |||
| 2627 | plane_state->crtc_x = plane_state->src_y = 0; | ||
| 2628 | plane_state->crtc_w = fb->width; | ||
| 2629 | plane_state->crtc_h = fb->height; | ||
| 2630 | |||
| 2625 | obj = intel_fb_obj(fb); | 2631 | obj = intel_fb_obj(fb); |
| 2626 | if (obj->tiling_mode != I915_TILING_NONE) | 2632 | if (obj->tiling_mode != I915_TILING_NONE) |
| 2627 | dev_priv->preserve_bios_swizzle = true; | 2633 | dev_priv->preserve_bios_swizzle = true; |
| 2628 | 2634 | ||
| 2629 | primary->fb = fb; | 2635 | drm_framebuffer_reference(fb); |
| 2630 | primary->state->crtc = &intel_crtc->base; | 2636 | primary->fb = primary->state->fb = fb; |
| 2631 | primary->crtc = &intel_crtc->base; | 2637 | primary->crtc = primary->state->crtc = &intel_crtc->base; |
| 2632 | update_state_fb(primary); | 2638 | intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); |
| 2633 | obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); | 2639 | obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; |
| 2634 | } | 2640 | } |
| 2635 | 2641 | ||
| 2636 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, | 2642 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, |
| @@ -2725,7 +2731,8 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, | |||
| 2725 | 2731 | ||
| 2726 | if (INTEL_INFO(dev)->gen >= 4) { | 2732 | if (INTEL_INFO(dev)->gen >= 4) { |
| 2727 | intel_crtc->dspaddr_offset = | 2733 | intel_crtc->dspaddr_offset = |
| 2728 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, | 2734 | intel_gen4_compute_page_offset(dev_priv, |
| 2735 | &x, &y, obj->tiling_mode, | ||
| 2729 | pixel_size, | 2736 | pixel_size, |
| 2730 | fb->pitches[0]); | 2737 | fb->pitches[0]); |
| 2731 | linear_offset -= intel_crtc->dspaddr_offset; | 2738 | linear_offset -= intel_crtc->dspaddr_offset; |
| @@ -2826,7 +2833,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, | |||
| 2826 | 2833 | ||
| 2827 | linear_offset = y * fb->pitches[0] + x * pixel_size; | 2834 | linear_offset = y * fb->pitches[0] + x * pixel_size; |
| 2828 | intel_crtc->dspaddr_offset = | 2835 | intel_crtc->dspaddr_offset = |
| 2829 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, | 2836 | intel_gen4_compute_page_offset(dev_priv, |
| 2837 | &x, &y, obj->tiling_mode, | ||
| 2830 | pixel_size, | 2838 | pixel_size, |
| 2831 | fb->pitches[0]); | 2839 | fb->pitches[0]); |
| 2832 | linear_offset -= intel_crtc->dspaddr_offset; | 2840 | linear_offset -= intel_crtc->dspaddr_offset; |
| @@ -2904,32 +2912,32 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, | |||
| 2904 | return i915_gem_obj_ggtt_offset_view(obj, view); | 2912 | return i915_gem_obj_ggtt_offset_view(obj, view); |
| 2905 | } | 2913 | } |
| 2906 | 2914 | ||
| 2915 | static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) | ||
| 2916 | { | ||
| 2917 | struct drm_device *dev = intel_crtc->base.dev; | ||
| 2918 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2919 | |||
| 2920 | I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); | ||
| 2921 | I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); | ||
| 2922 | I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); | ||
| 2923 | DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n", | ||
| 2924 | intel_crtc->base.base.id, intel_crtc->pipe, id); | ||
| 2925 | } | ||
| 2926 | |||
| 2907 | /* | 2927 | /* |
| 2908 | * This function detaches (aka. unbinds) unused scalers in hardware | 2928 | * This function detaches (aka. unbinds) unused scalers in hardware |
| 2909 | */ | 2929 | */ |
| 2910 | void skl_detach_scalers(struct intel_crtc *intel_crtc) | 2930 | static void skl_detach_scalers(struct intel_crtc *intel_crtc) |
| 2911 | { | 2931 | { |
| 2912 | struct drm_device *dev; | ||
| 2913 | struct drm_i915_private *dev_priv; | ||
| 2914 | struct intel_crtc_scaler_state *scaler_state; | 2932 | struct intel_crtc_scaler_state *scaler_state; |
| 2915 | int i; | 2933 | int i; |
| 2916 | 2934 | ||
| 2917 | if (!intel_crtc || !intel_crtc->config) | ||
| 2918 | return; | ||
| 2919 | |||
| 2920 | dev = intel_crtc->base.dev; | ||
| 2921 | dev_priv = dev->dev_private; | ||
| 2922 | scaler_state = &intel_crtc->config->scaler_state; | 2935 | scaler_state = &intel_crtc->config->scaler_state; |
| 2923 | 2936 | ||
| 2924 | /* loop through and disable scalers that aren't in use */ | 2937 | /* loop through and disable scalers that aren't in use */ |
| 2925 | for (i = 0; i < intel_crtc->num_scalers; i++) { | 2938 | for (i = 0; i < intel_crtc->num_scalers; i++) { |
| 2926 | if (!scaler_state->scalers[i].in_use) { | 2939 | if (!scaler_state->scalers[i].in_use) |
| 2927 | I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0); | 2940 | skl_detach_scaler(intel_crtc, i); |
| 2928 | I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0); | ||
| 2929 | I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0); | ||
| 2930 | DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n", | ||
| 2931 | intel_crtc->base.base.id, intel_crtc->pipe, i); | ||
| 2932 | } | ||
| 2933 | } | 2941 | } |
| 2934 | } | 2942 | } |
| 2935 | 2943 | ||
| @@ -3132,8 +3140,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 3132 | struct drm_device *dev = crtc->dev; | 3140 | struct drm_device *dev = crtc->dev; |
| 3133 | struct drm_i915_private *dev_priv = dev->dev_private; | 3141 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3134 | 3142 | ||
| 3135 | if (dev_priv->display.disable_fbc) | 3143 | if (dev_priv->fbc.disable_fbc) |
| 3136 | dev_priv->display.disable_fbc(dev); | 3144 | dev_priv->fbc.disable_fbc(dev_priv); |
| 3137 | 3145 | ||
| 3138 | dev_priv->display.update_primary_plane(crtc, fb, x, y); | 3146 | dev_priv->display.update_primary_plane(crtc, fb, x, y); |
| 3139 | 3147 | ||
| @@ -3176,24 +3184,8 @@ static void intel_update_primary_planes(struct drm_device *dev) | |||
| 3176 | } | 3184 | } |
| 3177 | } | 3185 | } |
| 3178 | 3186 | ||
| 3179 | void intel_crtc_reset(struct intel_crtc *crtc) | ||
| 3180 | { | ||
| 3181 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
| 3182 | |||
| 3183 | if (!crtc->active) | ||
| 3184 | return; | ||
| 3185 | |||
| 3186 | intel_crtc_disable_planes(&crtc->base); | ||
| 3187 | dev_priv->display.crtc_disable(&crtc->base); | ||
| 3188 | dev_priv->display.crtc_enable(&crtc->base); | ||
| 3189 | intel_crtc_enable_planes(&crtc->base); | ||
| 3190 | } | ||
| 3191 | |||
| 3192 | void intel_prepare_reset(struct drm_device *dev) | 3187 | void intel_prepare_reset(struct drm_device *dev) |
| 3193 | { | 3188 | { |
| 3194 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
| 3195 | struct intel_crtc *crtc; | ||
| 3196 | |||
| 3197 | /* no reset support for gen2 */ | 3189 | /* no reset support for gen2 */ |
| 3198 | if (IS_GEN2(dev)) | 3190 | if (IS_GEN2(dev)) |
| 3199 | return; | 3191 | return; |
| @@ -3203,18 +3195,11 @@ void intel_prepare_reset(struct drm_device *dev) | |||
| 3203 | return; | 3195 | return; |
| 3204 | 3196 | ||
| 3205 | drm_modeset_lock_all(dev); | 3197 | drm_modeset_lock_all(dev); |
| 3206 | |||
| 3207 | /* | 3198 | /* |
| 3208 | * Disabling the crtcs gracefully seems nicer. Also the | 3199 | * Disabling the crtcs gracefully seems nicer. Also the |
| 3209 | * g33 docs say we should at least disable all the planes. | 3200 | * g33 docs say we should at least disable all the planes. |
| 3210 | */ | 3201 | */ |
| 3211 | for_each_intel_crtc(dev, crtc) { | 3202 | intel_display_suspend(dev); |
| 3212 | if (!crtc->active) | ||
| 3213 | continue; | ||
| 3214 | |||
| 3215 | intel_crtc_disable_planes(&crtc->base); | ||
| 3216 | dev_priv->display.crtc_disable(&crtc->base); | ||
| 3217 | } | ||
| 3218 | } | 3203 | } |
| 3219 | 3204 | ||
| 3220 | void intel_finish_reset(struct drm_device *dev) | 3205 | void intel_finish_reset(struct drm_device *dev) |
| @@ -3258,7 +3243,7 @@ void intel_finish_reset(struct drm_device *dev) | |||
| 3258 | dev_priv->display.hpd_irq_setup(dev); | 3243 | dev_priv->display.hpd_irq_setup(dev); |
| 3259 | spin_unlock_irq(&dev_priv->irq_lock); | 3244 | spin_unlock_irq(&dev_priv->irq_lock); |
| 3260 | 3245 | ||
| 3261 | intel_modeset_setup_hw_state(dev, true); | 3246 | intel_display_resume(dev); |
| 3262 | 3247 | ||
| 3263 | intel_hpd_init(dev_priv); | 3248 | intel_hpd_init(dev_priv); |
| 3264 | 3249 | ||
| @@ -4200,34 +4185,16 @@ static void lpt_pch_enable(struct drm_crtc *crtc) | |||
| 4200 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); | 4185 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); |
| 4201 | } | 4186 | } |
| 4202 | 4187 | ||
| 4203 | void intel_put_shared_dpll(struct intel_crtc *crtc) | ||
| 4204 | { | ||
| 4205 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); | ||
| 4206 | |||
| 4207 | if (pll == NULL) | ||
| 4208 | return; | ||
| 4209 | |||
| 4210 | if (!(pll->config.crtc_mask & (1 << crtc->pipe))) { | ||
| 4211 | WARN(1, "bad %s crtc mask\n", pll->name); | ||
| 4212 | return; | ||
| 4213 | } | ||
| 4214 | |||
| 4215 | pll->config.crtc_mask &= ~(1 << crtc->pipe); | ||
| 4216 | if (pll->config.crtc_mask == 0) { | ||
| 4217 | WARN_ON(pll->on); | ||
| 4218 | WARN_ON(pll->active); | ||
| 4219 | } | ||
| 4220 | |||
| 4221 | crtc->config->shared_dpll = DPLL_ID_PRIVATE; | ||
| 4222 | } | ||
| 4223 | |||
| 4224 | struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | 4188 | struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, |
| 4225 | struct intel_crtc_state *crtc_state) | 4189 | struct intel_crtc_state *crtc_state) |
| 4226 | { | 4190 | { |
| 4227 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 4191 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 4228 | struct intel_shared_dpll *pll; | 4192 | struct intel_shared_dpll *pll; |
| 4193 | struct intel_shared_dpll_config *shared_dpll; | ||
| 4229 | enum intel_dpll_id i; | 4194 | enum intel_dpll_id i; |
| 4230 | 4195 | ||
| 4196 | shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); | ||
| 4197 | |||
| 4231 | if (HAS_PCH_IBX(dev_priv->dev)) { | 4198 | if (HAS_PCH_IBX(dev_priv->dev)) { |
| 4232 | /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ | 4199 | /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ |
| 4233 | i = (enum intel_dpll_id) crtc->pipe; | 4200 | i = (enum intel_dpll_id) crtc->pipe; |
| @@ -4236,7 +4203,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4236 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", | 4203 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", |
| 4237 | crtc->base.base.id, pll->name); | 4204 | crtc->base.base.id, pll->name); |
| 4238 | 4205 | ||
| 4239 | WARN_ON(pll->new_config->crtc_mask); | 4206 | WARN_ON(shared_dpll[i].crtc_mask); |
| 4240 | 4207 | ||
| 4241 | goto found; | 4208 | goto found; |
| 4242 | } | 4209 | } |
| @@ -4256,7 +4223,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4256 | pll = &dev_priv->shared_dplls[i]; | 4223 | pll = &dev_priv->shared_dplls[i]; |
| 4257 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", | 4224 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", |
| 4258 | crtc->base.base.id, pll->name); | 4225 | crtc->base.base.id, pll->name); |
| 4259 | WARN_ON(pll->new_config->crtc_mask); | 4226 | WARN_ON(shared_dpll[i].crtc_mask); |
| 4260 | 4227 | ||
| 4261 | goto found; | 4228 | goto found; |
| 4262 | } | 4229 | } |
| @@ -4265,15 +4232,15 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4265 | pll = &dev_priv->shared_dplls[i]; | 4232 | pll = &dev_priv->shared_dplls[i]; |
| 4266 | 4233 | ||
| 4267 | /* Only want to check enabled timings first */ | 4234 | /* Only want to check enabled timings first */ |
| 4268 | if (pll->new_config->crtc_mask == 0) | 4235 | if (shared_dpll[i].crtc_mask == 0) |
| 4269 | continue; | 4236 | continue; |
| 4270 | 4237 | ||
| 4271 | if (memcmp(&crtc_state->dpll_hw_state, | 4238 | if (memcmp(&crtc_state->dpll_hw_state, |
| 4272 | &pll->new_config->hw_state, | 4239 | &shared_dpll[i].hw_state, |
| 4273 | sizeof(pll->new_config->hw_state)) == 0) { | 4240 | sizeof(crtc_state->dpll_hw_state)) == 0) { |
| 4274 | DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", | 4241 | DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", |
| 4275 | crtc->base.base.id, pll->name, | 4242 | crtc->base.base.id, pll->name, |
| 4276 | pll->new_config->crtc_mask, | 4243 | shared_dpll[i].crtc_mask, |
| 4277 | pll->active); | 4244 | pll->active); |
| 4278 | goto found; | 4245 | goto found; |
| 4279 | } | 4246 | } |
| @@ -4282,7 +4249,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4282 | /* Ok no matching timings, maybe there's a free one? */ | 4249 | /* Ok no matching timings, maybe there's a free one? */ |
| 4283 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 4250 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
| 4284 | pll = &dev_priv->shared_dplls[i]; | 4251 | pll = &dev_priv->shared_dplls[i]; |
| 4285 | if (pll->new_config->crtc_mask == 0) { | 4252 | if (shared_dpll[i].crtc_mask == 0) { |
| 4286 | DRM_DEBUG_KMS("CRTC:%d allocated %s\n", | 4253 | DRM_DEBUG_KMS("CRTC:%d allocated %s\n", |
| 4287 | crtc->base.base.id, pll->name); | 4254 | crtc->base.base.id, pll->name); |
| 4288 | goto found; | 4255 | goto found; |
| @@ -4292,83 +4259,33 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4292 | return NULL; | 4259 | return NULL; |
| 4293 | 4260 | ||
| 4294 | found: | 4261 | found: |
| 4295 | if (pll->new_config->crtc_mask == 0) | 4262 | if (shared_dpll[i].crtc_mask == 0) |
| 4296 | pll->new_config->hw_state = crtc_state->dpll_hw_state; | 4263 | shared_dpll[i].hw_state = |
| 4264 | crtc_state->dpll_hw_state; | ||
| 4297 | 4265 | ||
| 4298 | crtc_state->shared_dpll = i; | 4266 | crtc_state->shared_dpll = i; |
| 4299 | DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, | 4267 | DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, |
| 4300 | pipe_name(crtc->pipe)); | 4268 | pipe_name(crtc->pipe)); |
| 4301 | 4269 | ||
| 4302 | pll->new_config->crtc_mask |= 1 << crtc->pipe; | 4270 | shared_dpll[i].crtc_mask |= 1 << crtc->pipe; |
| 4303 | 4271 | ||
| 4304 | return pll; | 4272 | return pll; |
| 4305 | } | 4273 | } |
| 4306 | 4274 | ||
| 4307 | /** | 4275 | static void intel_shared_dpll_commit(struct drm_atomic_state *state) |
| 4308 | * intel_shared_dpll_start_config - start a new PLL staged config | ||
| 4309 | * @dev_priv: DRM device | ||
| 4310 | * @clear_pipes: mask of pipes that will have their PLLs freed | ||
| 4311 | * | ||
| 4312 | * Starts a new PLL staged config, copying the current config but | ||
| 4313 | * releasing the references of pipes specified in clear_pipes. | ||
| 4314 | */ | ||
| 4315 | static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv, | ||
| 4316 | unsigned clear_pipes) | ||
| 4317 | { | ||
| 4318 | struct intel_shared_dpll *pll; | ||
| 4319 | enum intel_dpll_id i; | ||
| 4320 | |||
| 4321 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
| 4322 | pll = &dev_priv->shared_dplls[i]; | ||
| 4323 | |||
| 4324 | pll->new_config = kmemdup(&pll->config, sizeof pll->config, | ||
| 4325 | GFP_KERNEL); | ||
| 4326 | if (!pll->new_config) | ||
| 4327 | goto cleanup; | ||
| 4328 | |||
| 4329 | pll->new_config->crtc_mask &= ~clear_pipes; | ||
| 4330 | } | ||
| 4331 | |||
| 4332 | return 0; | ||
| 4333 | |||
| 4334 | cleanup: | ||
| 4335 | while (--i >= 0) { | ||
| 4336 | pll = &dev_priv->shared_dplls[i]; | ||
| 4337 | kfree(pll->new_config); | ||
| 4338 | pll->new_config = NULL; | ||
| 4339 | } | ||
| 4340 | |||
| 4341 | return -ENOMEM; | ||
| 4342 | } | ||
| 4343 | |||
| 4344 | static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv) | ||
| 4345 | { | 4276 | { |
| 4277 | struct drm_i915_private *dev_priv = to_i915(state->dev); | ||
| 4278 | struct intel_shared_dpll_config *shared_dpll; | ||
| 4346 | struct intel_shared_dpll *pll; | 4279 | struct intel_shared_dpll *pll; |
| 4347 | enum intel_dpll_id i; | 4280 | enum intel_dpll_id i; |
| 4348 | 4281 | ||
| 4349 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 4282 | if (!to_intel_atomic_state(state)->dpll_set) |
| 4350 | pll = &dev_priv->shared_dplls[i]; | 4283 | return; |
| 4351 | |||
| 4352 | WARN_ON(pll->new_config == &pll->config); | ||
| 4353 | |||
| 4354 | pll->config = *pll->new_config; | ||
| 4355 | kfree(pll->new_config); | ||
| 4356 | pll->new_config = NULL; | ||
| 4357 | } | ||
| 4358 | } | ||
| 4359 | |||
| 4360 | static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv) | ||
| 4361 | { | ||
| 4362 | struct intel_shared_dpll *pll; | ||
| 4363 | enum intel_dpll_id i; | ||
| 4364 | 4284 | ||
| 4285 | shared_dpll = to_intel_atomic_state(state)->shared_dpll; | ||
| 4365 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 4286 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
| 4366 | pll = &dev_priv->shared_dplls[i]; | 4287 | pll = &dev_priv->shared_dplls[i]; |
| 4367 | 4288 | pll->config = shared_dpll[i]; | |
| 4368 | WARN_ON(pll->new_config == &pll->config); | ||
| 4369 | |||
| 4370 | kfree(pll->new_config); | ||
| 4371 | pll->new_config = NULL; | ||
| 4372 | } | 4289 | } |
| 4373 | } | 4290 | } |
| 4374 | 4291 | ||
| @@ -4386,62 +4303,16 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe) | |||
| 4386 | } | 4303 | } |
| 4387 | } | 4304 | } |
| 4388 | 4305 | ||
| 4389 | /** | 4306 | static int |
| 4390 | * skl_update_scaler_users - Stages update to crtc's scaler state | 4307 | skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, |
| 4391 | * @intel_crtc: crtc | 4308 | unsigned scaler_user, int *scaler_id, unsigned int rotation, |
| 4392 | * @crtc_state: crtc_state | 4309 | int src_w, int src_h, int dst_w, int dst_h) |
| 4393 | * @plane: plane (NULL indicates crtc is requesting update) | ||
| 4394 | * @plane_state: plane's state | ||
| 4395 | * @force_detach: request unconditional detachment of scaler | ||
| 4396 | * | ||
| 4397 | * This function updates scaler state for requested plane or crtc. | ||
| 4398 | * To request scaler usage update for a plane, caller shall pass plane pointer. | ||
| 4399 | * To request scaler usage update for crtc, caller shall pass plane pointer | ||
| 4400 | * as NULL. | ||
| 4401 | * | ||
| 4402 | * Return | ||
| 4403 | * 0 - scaler_usage updated successfully | ||
| 4404 | * error - requested scaling cannot be supported or other error condition | ||
| 4405 | */ | ||
| 4406 | int | ||
| 4407 | skl_update_scaler_users( | ||
| 4408 | struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, | ||
| 4409 | struct intel_plane *intel_plane, struct intel_plane_state *plane_state, | ||
| 4410 | int force_detach) | ||
| 4411 | { | 4310 | { |
| 4311 | struct intel_crtc_scaler_state *scaler_state = | ||
| 4312 | &crtc_state->scaler_state; | ||
| 4313 | struct intel_crtc *intel_crtc = | ||
| 4314 | to_intel_crtc(crtc_state->base.crtc); | ||
| 4412 | int need_scaling; | 4315 | int need_scaling; |
| 4413 | int idx; | ||
| 4414 | int src_w, src_h, dst_w, dst_h; | ||
| 4415 | int *scaler_id; | ||
| 4416 | struct drm_framebuffer *fb; | ||
| 4417 | struct intel_crtc_scaler_state *scaler_state; | ||
| 4418 | unsigned int rotation; | ||
| 4419 | |||
| 4420 | if (!intel_crtc || !crtc_state) | ||
| 4421 | return 0; | ||
| 4422 | |||
| 4423 | scaler_state = &crtc_state->scaler_state; | ||
| 4424 | |||
| 4425 | idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX; | ||
| 4426 | fb = intel_plane ? plane_state->base.fb : NULL; | ||
| 4427 | |||
| 4428 | if (intel_plane) { | ||
| 4429 | src_w = drm_rect_width(&plane_state->src) >> 16; | ||
| 4430 | src_h = drm_rect_height(&plane_state->src) >> 16; | ||
| 4431 | dst_w = drm_rect_width(&plane_state->dst); | ||
| 4432 | dst_h = drm_rect_height(&plane_state->dst); | ||
| 4433 | scaler_id = &plane_state->scaler_id; | ||
| 4434 | rotation = plane_state->base.rotation; | ||
| 4435 | } else { | ||
| 4436 | struct drm_display_mode *adjusted_mode = | ||
| 4437 | &crtc_state->base.adjusted_mode; | ||
| 4438 | src_w = crtc_state->pipe_src_w; | ||
| 4439 | src_h = crtc_state->pipe_src_h; | ||
| 4440 | dst_w = adjusted_mode->hdisplay; | ||
| 4441 | dst_h = adjusted_mode->vdisplay; | ||
| 4442 | scaler_id = &scaler_state->scaler_id; | ||
| 4443 | rotation = DRM_ROTATE_0; | ||
| 4444 | } | ||
| 4445 | 4316 | ||
| 4446 | need_scaling = intel_rotation_90_or_270(rotation) ? | 4317 | need_scaling = intel_rotation_90_or_270(rotation) ? |
| 4447 | (src_h != dst_w || src_w != dst_h): | 4318 | (src_h != dst_w || src_w != dst_h): |
| @@ -4457,17 +4328,14 @@ skl_update_scaler_users( | |||
| 4457 | * update to free the scaler is done in plane/panel-fit programming. | 4328 | * update to free the scaler is done in plane/panel-fit programming. |
| 4458 | * For this purpose crtc/plane_state->scaler_id isn't reset here. | 4329 | * For this purpose crtc/plane_state->scaler_id isn't reset here. |
| 4459 | */ | 4330 | */ |
| 4460 | if (force_detach || !need_scaling || (intel_plane && | 4331 | if (force_detach || !need_scaling) { |
| 4461 | (!fb || !plane_state->visible))) { | ||
| 4462 | if (*scaler_id >= 0) { | 4332 | if (*scaler_id >= 0) { |
| 4463 | scaler_state->scaler_users &= ~(1 << idx); | 4333 | scaler_state->scaler_users &= ~(1 << scaler_user); |
| 4464 | scaler_state->scalers[*scaler_id].in_use = 0; | 4334 | scaler_state->scalers[*scaler_id].in_use = 0; |
| 4465 | 4335 | ||
| 4466 | DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d " | 4336 | DRM_DEBUG_KMS("scaler_user index %u.%u: " |
| 4467 | "crtc_state = %p scaler_users = 0x%x\n", | 4337 | "Staged freeing scaler id %d scaler_users = 0x%x\n", |
| 4468 | intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC", | 4338 | intel_crtc->pipe, scaler_user, *scaler_id, |
| 4469 | intel_plane ? intel_plane->base.base.id : | ||
| 4470 | intel_crtc->base.base.id, crtc_state, | ||
| 4471 | scaler_state->scaler_users); | 4339 | scaler_state->scaler_users); |
| 4472 | *scaler_id = -1; | 4340 | *scaler_id = -1; |
| 4473 | } | 4341 | } |
| @@ -4480,55 +4348,123 @@ skl_update_scaler_users( | |||
| 4480 | 4348 | ||
| 4481 | src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || | 4349 | src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || |
| 4482 | dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { | 4350 | dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { |
| 4483 | DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u " | 4351 | DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " |
| 4484 | "size is out of scaler range\n", | 4352 | "size is out of scaler range\n", |
| 4485 | intel_plane ? "PLANE" : "CRTC", | 4353 | intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); |
| 4486 | intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id, | ||
| 4487 | intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h); | ||
| 4488 | return -EINVAL; | 4354 | return -EINVAL; |
| 4489 | } | 4355 | } |
| 4490 | 4356 | ||
| 4357 | /* mark this plane as a scaler user in crtc_state */ | ||
| 4358 | scaler_state->scaler_users |= (1 << scaler_user); | ||
| 4359 | DRM_DEBUG_KMS("scaler_user index %u.%u: " | ||
| 4360 | "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", | ||
| 4361 | intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, | ||
| 4362 | scaler_state->scaler_users); | ||
| 4363 | |||
| 4364 | return 0; | ||
| 4365 | } | ||
| 4366 | |||
| 4367 | /** | ||
| 4368 | * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. | ||
| 4369 | * | ||
| 4370 | * @state: crtc's scaler state | ||
| 4371 | * | ||
| 4372 | * Return | ||
| 4373 | * 0 - scaler_usage updated successfully | ||
| 4374 | * error - requested scaling cannot be supported or other error condition | ||
| 4375 | */ | ||
| 4376 | int skl_update_scaler_crtc(struct intel_crtc_state *state) | ||
| 4377 | { | ||
| 4378 | struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); | ||
| 4379 | struct drm_display_mode *adjusted_mode = | ||
| 4380 | &state->base.adjusted_mode; | ||
| 4381 | |||
| 4382 | DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", | ||
| 4383 | intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); | ||
| 4384 | |||
| 4385 | return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, | ||
| 4386 | &state->scaler_state.scaler_id, DRM_ROTATE_0, | ||
| 4387 | state->pipe_src_w, state->pipe_src_h, | ||
| 4388 | adjusted_mode->hdisplay, adjusted_mode->vdisplay); | ||
| 4389 | } | ||
| 4390 | |||
| 4391 | /** | ||
| 4392 | * skl_update_scaler_plane - Stages update to scaler state for a given plane. | ||
| 4393 | * | ||
| 4394 | * @state: crtc's scaler state | ||
| 4395 | * @plane_state: atomic plane state to update | ||
| 4396 | * | ||
| 4397 | * Return | ||
| 4398 | * 0 - scaler_usage updated successfully | ||
| 4399 | * error - requested scaling cannot be supported or other error condition | ||
| 4400 | */ | ||
| 4401 | static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, | ||
| 4402 | struct intel_plane_state *plane_state) | ||
| 4403 | { | ||
| 4404 | |||
| 4405 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | ||
| 4406 | struct intel_plane *intel_plane = | ||
| 4407 | to_intel_plane(plane_state->base.plane); | ||
| 4408 | struct drm_framebuffer *fb = plane_state->base.fb; | ||
| 4409 | int ret; | ||
| 4410 | |||
| 4411 | bool force_detach = !fb || !plane_state->visible; | ||
| 4412 | |||
| 4413 | DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", | ||
| 4414 | intel_plane->base.base.id, intel_crtc->pipe, | ||
| 4415 | drm_plane_index(&intel_plane->base)); | ||
| 4416 | |||
| 4417 | ret = skl_update_scaler(crtc_state, force_detach, | ||
| 4418 | drm_plane_index(&intel_plane->base), | ||
| 4419 | &plane_state->scaler_id, | ||
| 4420 | plane_state->base.rotation, | ||
| 4421 | drm_rect_width(&plane_state->src) >> 16, | ||
| 4422 | drm_rect_height(&plane_state->src) >> 16, | ||
| 4423 | drm_rect_width(&plane_state->dst), | ||
| 4424 | drm_rect_height(&plane_state->dst)); | ||
| 4425 | |||
| 4426 | if (ret || plane_state->scaler_id < 0) | ||
| 4427 | return ret; | ||
| 4428 | |||
| 4491 | /* check colorkey */ | 4429 | /* check colorkey */ |
| 4492 | if (WARN_ON(intel_plane && | 4430 | if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { |
| 4493 | intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) { | 4431 | DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", |
| 4494 | DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey", | 4432 | intel_plane->base.base.id); |
| 4495 | intel_plane->base.base.id, src_w, src_h, dst_w, dst_h); | ||
| 4496 | return -EINVAL; | 4433 | return -EINVAL; |
| 4497 | } | 4434 | } |
| 4498 | 4435 | ||
| 4499 | /* Check src format */ | 4436 | /* Check src format */ |
| 4500 | if (intel_plane) { | 4437 | switch (fb->pixel_format) { |
| 4501 | switch (fb->pixel_format) { | 4438 | case DRM_FORMAT_RGB565: |
| 4502 | case DRM_FORMAT_RGB565: | 4439 | case DRM_FORMAT_XBGR8888: |
| 4503 | case DRM_FORMAT_XBGR8888: | 4440 | case DRM_FORMAT_XRGB8888: |
| 4504 | case DRM_FORMAT_XRGB8888: | 4441 | case DRM_FORMAT_ABGR8888: |
| 4505 | case DRM_FORMAT_ABGR8888: | 4442 | case DRM_FORMAT_ARGB8888: |
| 4506 | case DRM_FORMAT_ARGB8888: | 4443 | case DRM_FORMAT_XRGB2101010: |
| 4507 | case DRM_FORMAT_XRGB2101010: | 4444 | case DRM_FORMAT_XBGR2101010: |
| 4508 | case DRM_FORMAT_XBGR2101010: | 4445 | case DRM_FORMAT_YUYV: |
| 4509 | case DRM_FORMAT_YUYV: | 4446 | case DRM_FORMAT_YVYU: |
| 4510 | case DRM_FORMAT_YVYU: | 4447 | case DRM_FORMAT_UYVY: |
| 4511 | case DRM_FORMAT_UYVY: | 4448 | case DRM_FORMAT_VYUY: |
| 4512 | case DRM_FORMAT_VYUY: | 4449 | break; |
| 4513 | break; | 4450 | default: |
| 4514 | default: | 4451 | DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", |
| 4515 | DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n", | 4452 | intel_plane->base.base.id, fb->base.id, fb->pixel_format); |
| 4516 | intel_plane->base.base.id, fb->base.id, fb->pixel_format); | 4453 | return -EINVAL; |
| 4517 | return -EINVAL; | ||
| 4518 | } | ||
| 4519 | } | 4454 | } |
| 4520 | 4455 | ||
| 4521 | /* mark this plane as a scaler user in crtc_state */ | ||
| 4522 | scaler_state->scaler_users |= (1 << idx); | ||
| 4523 | DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u " | ||
| 4524 | "crtc_state = %p scaler_users = 0x%x\n", | ||
| 4525 | intel_plane ? "PLANE" : "CRTC", | ||
| 4526 | intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id, | ||
| 4527 | src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users); | ||
| 4528 | return 0; | 4456 | return 0; |
| 4529 | } | 4457 | } |
| 4530 | 4458 | ||
| 4531 | static void skylake_pfit_update(struct intel_crtc *crtc, int enable) | 4459 | static void skylake_scaler_disable(struct intel_crtc *crtc) |
| 4460 | { | ||
| 4461 | int i; | ||
| 4462 | |||
| 4463 | for (i = 0; i < crtc->num_scalers; i++) | ||
| 4464 | skl_detach_scaler(crtc, i); | ||
| 4465 | } | ||
| 4466 | |||
| 4467 | static void skylake_pfit_enable(struct intel_crtc *crtc) | ||
| 4532 | { | 4468 | { |
| 4533 | struct drm_device *dev = crtc->base.dev; | 4469 | struct drm_device *dev = crtc->base.dev; |
| 4534 | struct drm_i915_private *dev_priv = dev->dev_private; | 4470 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -4538,13 +4474,6 @@ static void skylake_pfit_update(struct intel_crtc *crtc, int enable) | |||
| 4538 | 4474 | ||
| 4539 | DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); | 4475 | DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); |
| 4540 | 4476 | ||
| 4541 | /* To update pfit, first update scaler state */ | ||
| 4542 | skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable); | ||
| 4543 | intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config); | ||
| 4544 | skl_detach_scalers(crtc); | ||
| 4545 | if (!enable) | ||
| 4546 | return; | ||
| 4547 | |||
| 4548 | if (crtc->config->pch_pfit.enabled) { | 4477 | if (crtc->config->pch_pfit.enabled) { |
| 4549 | int id; | 4478 | int id; |
| 4550 | 4479 | ||
| @@ -4584,20 +4513,6 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) | |||
| 4584 | } | 4513 | } |
| 4585 | } | 4514 | } |
| 4586 | 4515 | ||
| 4587 | static void intel_enable_sprite_planes(struct drm_crtc *crtc) | ||
| 4588 | { | ||
| 4589 | struct drm_device *dev = crtc->dev; | ||
| 4590 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
| 4591 | struct drm_plane *plane; | ||
| 4592 | struct intel_plane *intel_plane; | ||
| 4593 | |||
| 4594 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { | ||
| 4595 | intel_plane = to_intel_plane(plane); | ||
| 4596 | if (intel_plane->pipe == pipe) | ||
| 4597 | intel_plane_restore(&intel_plane->base); | ||
| 4598 | } | ||
| 4599 | } | ||
| 4600 | |||
| 4601 | void hsw_enable_ips(struct intel_crtc *crtc) | 4516 | void hsw_enable_ips(struct intel_crtc *crtc) |
| 4602 | { | 4517 | { |
| 4603 | struct drm_device *dev = crtc->base.dev; | 4518 | struct drm_device *dev = crtc->base.dev; |
| @@ -4668,7 +4583,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
| 4668 | bool reenable_ips = false; | 4583 | bool reenable_ips = false; |
| 4669 | 4584 | ||
| 4670 | /* The clocks have to be on to load the palette. */ | 4585 | /* The clocks have to be on to load the palette. */ |
| 4671 | if (!crtc->state->enable || !intel_crtc->active) | 4586 | if (!crtc->state->active) |
| 4672 | return; | 4587 | return; |
| 4673 | 4588 | ||
| 4674 | if (HAS_GMCH_DISPLAY(dev_priv->dev)) { | 4589 | if (HAS_GMCH_DISPLAY(dev_priv->dev)) { |
| @@ -4755,10 +4670,6 @@ intel_post_enable_primary(struct drm_crtc *crtc) | |||
| 4755 | */ | 4670 | */ |
| 4756 | hsw_enable_ips(intel_crtc); | 4671 | hsw_enable_ips(intel_crtc); |
| 4757 | 4672 | ||
| 4758 | mutex_lock(&dev->struct_mutex); | ||
| 4759 | intel_fbc_update(dev); | ||
| 4760 | mutex_unlock(&dev->struct_mutex); | ||
| 4761 | |||
| 4762 | /* | 4673 | /* |
| 4763 | * Gen2 reports pipe underruns whenever all planes are disabled. | 4674 | * Gen2 reports pipe underruns whenever all planes are disabled. |
| 4764 | * So don't enable underrun reporting before at least some planes | 4675 | * So don't enable underrun reporting before at least some planes |
| @@ -4810,13 +4721,11 @@ intel_pre_disable_primary(struct drm_crtc *crtc) | |||
| 4810 | * event which is after the vblank start event, so we need to have a | 4721 | * event which is after the vblank start event, so we need to have a |
| 4811 | * wait-for-vblank between disabling the plane and the pipe. | 4722 | * wait-for-vblank between disabling the plane and the pipe. |
| 4812 | */ | 4723 | */ |
| 4813 | if (HAS_GMCH_DISPLAY(dev)) | 4724 | if (HAS_GMCH_DISPLAY(dev)) { |
| 4814 | intel_set_memory_cxsr(dev_priv, false); | 4725 | intel_set_memory_cxsr(dev_priv, false); |
| 4815 | 4726 | dev_priv->wm.vlv.cxsr = false; | |
| 4816 | mutex_lock(&dev->struct_mutex); | 4727 | intel_wait_for_vblank(dev, pipe); |
| 4817 | if (dev_priv->fbc.crtc == intel_crtc) | 4728 | } |
| 4818 | intel_fbc_disable(dev); | ||
| 4819 | mutex_unlock(&dev->struct_mutex); | ||
| 4820 | 4729 | ||
| 4821 | /* | 4730 | /* |
| 4822 | * FIXME IPS should be fine as long as one plane is | 4731 | * FIXME IPS should be fine as long as one plane is |
| @@ -4827,49 +4736,83 @@ intel_pre_disable_primary(struct drm_crtc *crtc) | |||
| 4827 | hsw_disable_ips(intel_crtc); | 4736 | hsw_disable_ips(intel_crtc); |
| 4828 | } | 4737 | } |
| 4829 | 4738 | ||
| 4830 | static void intel_crtc_enable_planes(struct drm_crtc *crtc) | 4739 | static void intel_post_plane_update(struct intel_crtc *crtc) |
| 4831 | { | 4740 | { |
| 4832 | struct drm_device *dev = crtc->dev; | 4741 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; |
| 4833 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4742 | struct drm_device *dev = crtc->base.dev; |
| 4834 | int pipe = intel_crtc->pipe; | 4743 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4744 | struct drm_plane *plane; | ||
| 4835 | 4745 | ||
| 4836 | intel_enable_primary_hw_plane(crtc->primary, crtc); | 4746 | if (atomic->wait_vblank) |
| 4837 | intel_enable_sprite_planes(crtc); | 4747 | intel_wait_for_vblank(dev, crtc->pipe); |
| 4838 | intel_crtc_update_cursor(crtc, true); | ||
| 4839 | 4748 | ||
| 4840 | intel_post_enable_primary(crtc); | 4749 | intel_frontbuffer_flip(dev, atomic->fb_bits); |
| 4841 | 4750 | ||
| 4842 | /* | 4751 | if (atomic->disable_cxsr) |
| 4843 | * FIXME: Once we grow proper nuclear flip support out of this we need | 4752 | crtc->wm.cxsr_allowed = true; |
| 4844 | * to compute the mask of flip planes precisely. For the time being | 4753 | |
| 4845 | * consider this a flip to a NULL plane. | 4754 | if (crtc->atomic.update_wm_post) |
| 4846 | */ | 4755 | intel_update_watermarks(&crtc->base); |
| 4847 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); | 4756 | |
| 4757 | if (atomic->update_fbc) | ||
| 4758 | intel_fbc_update(dev_priv); | ||
| 4759 | |||
| 4760 | if (atomic->post_enable_primary) | ||
| 4761 | intel_post_enable_primary(&crtc->base); | ||
| 4762 | |||
| 4763 | drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks) | ||
| 4764 | intel_update_sprite_watermarks(plane, &crtc->base, | ||
| 4765 | 0, 0, 0, false, false); | ||
| 4766 | |||
| 4767 | memset(atomic, 0, sizeof(*atomic)); | ||
| 4848 | } | 4768 | } |
| 4849 | 4769 | ||
| 4850 | static void intel_crtc_disable_planes(struct drm_crtc *crtc) | 4770 | static void intel_pre_plane_update(struct intel_crtc *crtc) |
| 4851 | { | 4771 | { |
| 4852 | struct drm_device *dev = crtc->dev; | 4772 | struct drm_device *dev = crtc->base.dev; |
| 4853 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4773 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4854 | struct intel_plane *intel_plane; | 4774 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; |
| 4855 | int pipe = intel_crtc->pipe; | 4775 | struct drm_plane *p; |
| 4856 | 4776 | ||
| 4857 | if (!intel_crtc->active) | 4777 | /* Track fb's for any planes being disabled */ |
| 4858 | return; | 4778 | drm_for_each_plane_mask(p, dev, atomic->disabled_planes) { |
| 4779 | struct intel_plane *plane = to_intel_plane(p); | ||
| 4859 | 4780 | ||
| 4860 | intel_crtc_wait_for_pending_flips(crtc); | 4781 | mutex_lock(&dev->struct_mutex); |
| 4782 | i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL, | ||
| 4783 | plane->frontbuffer_bit); | ||
| 4784 | mutex_unlock(&dev->struct_mutex); | ||
| 4785 | } | ||
| 4861 | 4786 | ||
| 4862 | intel_pre_disable_primary(crtc); | 4787 | if (atomic->wait_for_flips) |
| 4788 | intel_crtc_wait_for_pending_flips(&crtc->base); | ||
| 4863 | 4789 | ||
| 4864 | intel_crtc_dpms_overlay_disable(intel_crtc); | 4790 | if (atomic->disable_fbc) |
| 4865 | for_each_intel_plane(dev, intel_plane) { | 4791 | intel_fbc_disable_crtc(crtc); |
| 4866 | if (intel_plane->pipe == pipe) { | ||
| 4867 | struct drm_crtc *from = intel_plane->base.crtc; | ||
| 4868 | 4792 | ||
| 4869 | intel_plane->disable_plane(&intel_plane->base, | 4793 | if (crtc->atomic.disable_ips) |
| 4870 | from ?: crtc, true); | 4794 | hsw_disable_ips(crtc); |
| 4871 | } | 4795 | |
| 4796 | if (atomic->pre_disable_primary) | ||
| 4797 | intel_pre_disable_primary(&crtc->base); | ||
| 4798 | |||
| 4799 | if (atomic->disable_cxsr) { | ||
| 4800 | crtc->wm.cxsr_allowed = false; | ||
| 4801 | intel_set_memory_cxsr(dev_priv, false); | ||
| 4872 | } | 4802 | } |
| 4803 | } | ||
| 4804 | |||
| 4805 | static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) | ||
| 4806 | { | ||
| 4807 | struct drm_device *dev = crtc->dev; | ||
| 4808 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 4809 | struct drm_plane *p; | ||
| 4810 | int pipe = intel_crtc->pipe; | ||
| 4811 | |||
| 4812 | intel_crtc_dpms_overlay_disable(intel_crtc); | ||
| 4813 | |||
| 4814 | drm_for_each_plane_mask(p, dev, plane_mask) | ||
| 4815 | to_intel_plane(p)->disable_plane(p, crtc); | ||
| 4873 | 4816 | ||
| 4874 | /* | 4817 | /* |
| 4875 | * FIXME: Once we grow proper nuclear flip support out of this we need | 4818 | * FIXME: Once we grow proper nuclear flip support out of this we need |
| @@ -4887,9 +4830,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
| 4887 | struct intel_encoder *encoder; | 4830 | struct intel_encoder *encoder; |
| 4888 | int pipe = intel_crtc->pipe; | 4831 | int pipe = intel_crtc->pipe; |
| 4889 | 4832 | ||
| 4890 | WARN_ON(!crtc->state->enable); | 4833 | if (WARN_ON(intel_crtc->active)) |
| 4891 | |||
| 4892 | if (intel_crtc->active) | ||
| 4893 | return; | 4834 | return; |
| 4894 | 4835 | ||
| 4895 | if (intel_crtc->config->has_pch_encoder) | 4836 | if (intel_crtc->config->has_pch_encoder) |
| @@ -4956,46 +4897,17 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) | |||
| 4956 | return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; | 4897 | return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; |
| 4957 | } | 4898 | } |
| 4958 | 4899 | ||
| 4959 | /* | ||
| 4960 | * This implements the workaround described in the "notes" section of the mode | ||
| 4961 | * set sequence documentation. When going from no pipes or single pipe to | ||
| 4962 | * multiple pipes, and planes are enabled after the pipe, we need to wait at | ||
| 4963 | * least 2 vblanks on the first pipe before enabling planes on the second pipe. | ||
| 4964 | */ | ||
| 4965 | static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) | ||
| 4966 | { | ||
| 4967 | struct drm_device *dev = crtc->base.dev; | ||
| 4968 | struct intel_crtc *crtc_it, *other_active_crtc = NULL; | ||
| 4969 | |||
| 4970 | /* We want to get the other_active_crtc only if there's only 1 other | ||
| 4971 | * active crtc. */ | ||
| 4972 | for_each_intel_crtc(dev, crtc_it) { | ||
| 4973 | if (!crtc_it->active || crtc_it == crtc) | ||
| 4974 | continue; | ||
| 4975 | |||
| 4976 | if (other_active_crtc) | ||
| 4977 | return; | ||
| 4978 | |||
| 4979 | other_active_crtc = crtc_it; | ||
| 4980 | } | ||
| 4981 | if (!other_active_crtc) | ||
| 4982 | return; | ||
| 4983 | |||
| 4984 | intel_wait_for_vblank(dev, other_active_crtc->pipe); | ||
| 4985 | intel_wait_for_vblank(dev, other_active_crtc->pipe); | ||
| 4986 | } | ||
| 4987 | |||
| 4988 | static void haswell_crtc_enable(struct drm_crtc *crtc) | 4900 | static void haswell_crtc_enable(struct drm_crtc *crtc) |
| 4989 | { | 4901 | { |
| 4990 | struct drm_device *dev = crtc->dev; | 4902 | struct drm_device *dev = crtc->dev; |
| 4991 | struct drm_i915_private *dev_priv = dev->dev_private; | 4903 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4992 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4904 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 4993 | struct intel_encoder *encoder; | 4905 | struct intel_encoder *encoder; |
| 4994 | int pipe = intel_crtc->pipe; | 4906 | int pipe = intel_crtc->pipe, hsw_workaround_pipe; |
| 4907 | struct intel_crtc_state *pipe_config = | ||
| 4908 | to_intel_crtc_state(crtc->state); | ||
| 4995 | 4909 | ||
| 4996 | WARN_ON(!crtc->state->enable); | 4910 | if (WARN_ON(intel_crtc->active)) |
| 4997 | |||
| 4998 | if (intel_crtc->active) | ||
| 4999 | return; | 4911 | return; |
| 5000 | 4912 | ||
| 5001 | if (intel_crtc_to_shared_dpll(intel_crtc)) | 4913 | if (intel_crtc_to_shared_dpll(intel_crtc)) |
| @@ -5036,7 +4948,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
| 5036 | intel_ddi_enable_pipe_clock(intel_crtc); | 4948 | intel_ddi_enable_pipe_clock(intel_crtc); |
| 5037 | 4949 | ||
| 5038 | if (INTEL_INFO(dev)->gen == 9) | 4950 | if (INTEL_INFO(dev)->gen == 9) |
| 5039 | skylake_pfit_update(intel_crtc, 1); | 4951 | skylake_pfit_enable(intel_crtc); |
| 5040 | else if (INTEL_INFO(dev)->gen < 9) | 4952 | else if (INTEL_INFO(dev)->gen < 9) |
| 5041 | ironlake_pfit_enable(intel_crtc); | 4953 | ironlake_pfit_enable(intel_crtc); |
| 5042 | else | 4954 | else |
| @@ -5070,7 +4982,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
| 5070 | 4982 | ||
| 5071 | /* If we change the relative order between pipe/planes enabling, we need | 4983 | /* If we change the relative order between pipe/planes enabling, we need |
| 5072 | * to change the workaround. */ | 4984 | * to change the workaround. */ |
| 5073 | haswell_mode_set_planes_workaround(intel_crtc); | 4985 | hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; |
| 4986 | if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) { | ||
| 4987 | intel_wait_for_vblank(dev, hsw_workaround_pipe); | ||
| 4988 | intel_wait_for_vblank(dev, hsw_workaround_pipe); | ||
| 4989 | } | ||
| 5074 | } | 4990 | } |
| 5075 | 4991 | ||
| 5076 | static void ironlake_pfit_disable(struct intel_crtc *crtc) | 4992 | static void ironlake_pfit_disable(struct intel_crtc *crtc) |
| @@ -5097,9 +5013,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
| 5097 | int pipe = intel_crtc->pipe; | 5013 | int pipe = intel_crtc->pipe; |
| 5098 | u32 reg, temp; | 5014 | u32 reg, temp; |
| 5099 | 5015 | ||
| 5100 | if (!intel_crtc->active) | ||
| 5101 | return; | ||
| 5102 | |||
| 5103 | for_each_encoder_on_crtc(dev, crtc, encoder) | 5016 | for_each_encoder_on_crtc(dev, crtc, encoder) |
| 5104 | encoder->disable(encoder); | 5017 | encoder->disable(encoder); |
| 5105 | 5018 | ||
| @@ -5138,18 +5051,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
| 5138 | I915_WRITE(PCH_DPLL_SEL, temp); | 5051 | I915_WRITE(PCH_DPLL_SEL, temp); |
| 5139 | } | 5052 | } |
| 5140 | 5053 | ||
| 5141 | /* disable PCH DPLL */ | ||
| 5142 | intel_disable_shared_dpll(intel_crtc); | ||
| 5143 | |||
| 5144 | ironlake_fdi_pll_disable(intel_crtc); | 5054 | ironlake_fdi_pll_disable(intel_crtc); |
| 5145 | } | 5055 | } |
| 5146 | 5056 | ||
| 5147 | intel_crtc->active = false; | 5057 | intel_crtc->active = false; |
| 5148 | intel_update_watermarks(crtc); | 5058 | intel_update_watermarks(crtc); |
| 5149 | |||
| 5150 | mutex_lock(&dev->struct_mutex); | ||
| 5151 | intel_fbc_update(dev); | ||
| 5152 | mutex_unlock(&dev->struct_mutex); | ||
| 5153 | } | 5059 | } |
| 5154 | 5060 | ||
| 5155 | static void haswell_crtc_disable(struct drm_crtc *crtc) | 5061 | static void haswell_crtc_disable(struct drm_crtc *crtc) |
| @@ -5160,9 +5066,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
| 5160 | struct intel_encoder *encoder; | 5066 | struct intel_encoder *encoder; |
| 5161 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 5067 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; |
| 5162 | 5068 | ||
| 5163 | if (!intel_crtc->active) | ||
| 5164 | return; | ||
| 5165 | |||
| 5166 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 5069 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
| 5167 | intel_opregion_notify_encoder(encoder, false); | 5070 | intel_opregion_notify_encoder(encoder, false); |
| 5168 | encoder->disable(encoder); | 5071 | encoder->disable(encoder); |
| @@ -5182,7 +5085,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
| 5182 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); | 5085 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
| 5183 | 5086 | ||
| 5184 | if (INTEL_INFO(dev)->gen == 9) | 5087 | if (INTEL_INFO(dev)->gen == 9) |
| 5185 | skylake_pfit_update(intel_crtc, 0); | 5088 | skylake_scaler_disable(intel_crtc); |
| 5186 | else if (INTEL_INFO(dev)->gen < 9) | 5089 | else if (INTEL_INFO(dev)->gen < 9) |
| 5187 | ironlake_pfit_disable(intel_crtc); | 5090 | ironlake_pfit_disable(intel_crtc); |
| 5188 | else | 5091 | else |
| @@ -5201,22 +5104,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
| 5201 | 5104 | ||
| 5202 | intel_crtc->active = false; | 5105 | intel_crtc->active = false; |
| 5203 | intel_update_watermarks(crtc); | 5106 | intel_update_watermarks(crtc); |
| 5204 | |||
| 5205 | mutex_lock(&dev->struct_mutex); | ||
| 5206 | intel_fbc_update(dev); | ||
| 5207 | mutex_unlock(&dev->struct_mutex); | ||
| 5208 | |||
| 5209 | if (intel_crtc_to_shared_dpll(intel_crtc)) | ||
| 5210 | intel_disable_shared_dpll(intel_crtc); | ||
| 5211 | } | ||
| 5212 | |||
| 5213 | static void ironlake_crtc_off(struct drm_crtc *crtc) | ||
| 5214 | { | ||
| 5215 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 5216 | intel_put_shared_dpll(intel_crtc); | ||
| 5217 | } | 5107 | } |
| 5218 | 5108 | ||
| 5219 | |||
| 5220 | static void i9xx_pfit_enable(struct intel_crtc *crtc) | 5109 | static void i9xx_pfit_enable(struct intel_crtc *crtc) |
| 5221 | { | 5110 | { |
| 5222 | struct drm_device *dev = crtc->base.dev; | 5111 | struct drm_device *dev = crtc->base.dev; |
| @@ -5298,6 +5187,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) | |||
| 5298 | unsigned long mask; | 5187 | unsigned long mask; |
| 5299 | enum transcoder transcoder; | 5188 | enum transcoder transcoder; |
| 5300 | 5189 | ||
| 5190 | if (!crtc->state->active) | ||
| 5191 | return 0; | ||
| 5192 | |||
| 5301 | transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); | 5193 | transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); |
| 5302 | 5194 | ||
| 5303 | mask = BIT(POWER_DOMAIN_PIPE(pipe)); | 5195 | mask = BIT(POWER_DOMAIN_PIPE(pipe)); |
| @@ -5312,45 +5204,131 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) | |||
| 5312 | return mask; | 5204 | return mask; |
| 5313 | } | 5205 | } |
| 5314 | 5206 | ||
| 5207 | static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc) | ||
| 5208 | { | ||
| 5209 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | ||
| 5210 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 5211 | enum intel_display_power_domain domain; | ||
| 5212 | unsigned long domains, new_domains, old_domains; | ||
| 5213 | |||
| 5214 | old_domains = intel_crtc->enabled_power_domains; | ||
| 5215 | intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc); | ||
| 5216 | |||
| 5217 | domains = new_domains & ~old_domains; | ||
| 5218 | |||
| 5219 | for_each_power_domain(domain, domains) | ||
| 5220 | intel_display_power_get(dev_priv, domain); | ||
| 5221 | |||
| 5222 | return old_domains & ~new_domains; | ||
| 5223 | } | ||
| 5224 | |||
| 5225 | static void modeset_put_power_domains(struct drm_i915_private *dev_priv, | ||
| 5226 | unsigned long domains) | ||
| 5227 | { | ||
| 5228 | enum intel_display_power_domain domain; | ||
| 5229 | |||
| 5230 | for_each_power_domain(domain, domains) | ||
| 5231 | intel_display_power_put(dev_priv, domain); | ||
| 5232 | } | ||
| 5233 | |||
| 5315 | static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) | 5234 | static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) |
| 5316 | { | 5235 | { |
| 5317 | struct drm_device *dev = state->dev; | 5236 | struct drm_device *dev = state->dev; |
| 5318 | struct drm_i915_private *dev_priv = dev->dev_private; | 5237 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5319 | unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; | 5238 | unsigned long put_domains[I915_MAX_PIPES] = {}; |
| 5320 | struct intel_crtc *crtc; | 5239 | struct drm_crtc_state *crtc_state; |
| 5240 | struct drm_crtc *crtc; | ||
| 5241 | int i; | ||
| 5321 | 5242 | ||
| 5322 | /* | 5243 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 5323 | * First get all needed power domains, then put all unneeded, to avoid | 5244 | if (needs_modeset(crtc->state)) |
| 5324 | * any unnecessary toggling of the power wells. | 5245 | put_domains[to_intel_crtc(crtc)->pipe] = |
| 5325 | */ | 5246 | modeset_get_crtc_power_domains(crtc); |
| 5326 | for_each_intel_crtc(dev, crtc) { | 5247 | } |
| 5327 | enum intel_display_power_domain domain; | ||
| 5328 | 5248 | ||
| 5329 | if (!crtc->base.state->enable) | 5249 | if (dev_priv->display.modeset_commit_cdclk) { |
| 5330 | continue; | 5250 | unsigned int cdclk = to_intel_atomic_state(state)->cdclk; |
| 5331 | 5251 | ||
| 5332 | pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); | 5252 | if (cdclk != dev_priv->cdclk_freq && |
| 5253 | !WARN_ON(!state->allow_modeset)) | ||
| 5254 | dev_priv->display.modeset_commit_cdclk(state); | ||
| 5255 | } | ||
| 5333 | 5256 | ||
| 5334 | for_each_power_domain(domain, pipe_domains[crtc->pipe]) | 5257 | for (i = 0; i < I915_MAX_PIPES; i++) |
| 5335 | intel_display_power_get(dev_priv, domain); | 5258 | if (put_domains[i]) |
| 5259 | modeset_put_power_domains(dev_priv, put_domains[i]); | ||
| 5260 | } | ||
| 5261 | |||
| 5262 | static void intel_update_max_cdclk(struct drm_device *dev) | ||
| 5263 | { | ||
| 5264 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5265 | |||
| 5266 | if (IS_SKYLAKE(dev)) { | ||
| 5267 | u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; | ||
| 5268 | |||
| 5269 | if (limit == SKL_DFSM_CDCLK_LIMIT_675) | ||
| 5270 | dev_priv->max_cdclk_freq = 675000; | ||
| 5271 | else if (limit == SKL_DFSM_CDCLK_LIMIT_540) | ||
| 5272 | dev_priv->max_cdclk_freq = 540000; | ||
| 5273 | else if (limit == SKL_DFSM_CDCLK_LIMIT_450) | ||
| 5274 | dev_priv->max_cdclk_freq = 450000; | ||
| 5275 | else | ||
| 5276 | dev_priv->max_cdclk_freq = 337500; | ||
| 5277 | } else if (IS_BROADWELL(dev)) { | ||
| 5278 | /* | ||
| 5279 | * FIXME with extra cooling we can allow | ||
| 5280 | * 540 MHz for ULX and 675 Mhz for ULT. | ||
| 5281 | * How can we know if extra cooling is | ||
| 5282 | * available? PCI ID, VTB, something else? | ||
| 5283 | */ | ||
| 5284 | if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) | ||
| 5285 | dev_priv->max_cdclk_freq = 450000; | ||
| 5286 | else if (IS_BDW_ULX(dev)) | ||
| 5287 | dev_priv->max_cdclk_freq = 450000; | ||
| 5288 | else if (IS_BDW_ULT(dev)) | ||
| 5289 | dev_priv->max_cdclk_freq = 540000; | ||
| 5290 | else | ||
| 5291 | dev_priv->max_cdclk_freq = 675000; | ||
| 5292 | } else if (IS_CHERRYVIEW(dev)) { | ||
| 5293 | dev_priv->max_cdclk_freq = 320000; | ||
| 5294 | } else if (IS_VALLEYVIEW(dev)) { | ||
| 5295 | dev_priv->max_cdclk_freq = 400000; | ||
| 5296 | } else { | ||
| 5297 | /* otherwise assume cdclk is fixed */ | ||
| 5298 | dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; | ||
| 5336 | } | 5299 | } |
| 5337 | 5300 | ||
| 5338 | if (dev_priv->display.modeset_global_resources) | 5301 | DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", |
| 5339 | dev_priv->display.modeset_global_resources(state); | 5302 | dev_priv->max_cdclk_freq); |
| 5303 | } | ||
| 5340 | 5304 | ||
| 5341 | for_each_intel_crtc(dev, crtc) { | 5305 | static void intel_update_cdclk(struct drm_device *dev) |
| 5342 | enum intel_display_power_domain domain; | 5306 | { |
| 5307 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5343 | 5308 | ||
| 5344 | for_each_power_domain(domain, crtc->enabled_power_domains) | 5309 | dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); |
| 5345 | intel_display_power_put(dev_priv, domain); | 5310 | DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", |
| 5311 | dev_priv->cdclk_freq); | ||
| 5346 | 5312 | ||
| 5347 | crtc->enabled_power_domains = pipe_domains[crtc->pipe]; | 5313 | /* |
| 5314 | * Program the gmbus_freq based on the cdclk frequency. | ||
| 5315 | * BSpec erroneously claims we should aim for 4MHz, but | ||
| 5316 | * in fact 1MHz is the correct frequency. | ||
| 5317 | */ | ||
| 5318 | if (IS_VALLEYVIEW(dev)) { | ||
| 5319 | /* | ||
| 5320 | * Program the gmbus_freq based on the cdclk frequency. | ||
| 5321 | * BSpec erroneously claims we should aim for 4MHz, but | ||
| 5322 | * in fact 1MHz is the correct frequency. | ||
| 5323 | */ | ||
| 5324 | I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); | ||
| 5348 | } | 5325 | } |
| 5349 | 5326 | ||
| 5350 | intel_display_set_init_power(dev_priv, false); | 5327 | if (dev_priv->max_cdclk_freq == 0) |
| 5328 | intel_update_max_cdclk(dev); | ||
| 5351 | } | 5329 | } |
| 5352 | 5330 | ||
| 5353 | void broxton_set_cdclk(struct drm_device *dev, int frequency) | 5331 | static void broxton_set_cdclk(struct drm_device *dev, int frequency) |
| 5354 | { | 5332 | { |
| 5355 | struct drm_i915_private *dev_priv = dev->dev_private; | 5333 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5356 | uint32_t divider; | 5334 | uint32_t divider; |
| @@ -5466,7 +5444,7 @@ void broxton_set_cdclk(struct drm_device *dev, int frequency) | |||
| 5466 | return; | 5444 | return; |
| 5467 | } | 5445 | } |
| 5468 | 5446 | ||
| 5469 | dev_priv->cdclk_freq = frequency; | 5447 | intel_update_cdclk(dev); |
| 5470 | } | 5448 | } |
| 5471 | 5449 | ||
| 5472 | void broxton_init_cdclk(struct drm_device *dev) | 5450 | void broxton_init_cdclk(struct drm_device *dev) |
| @@ -5641,6 +5619,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) | |||
| 5641 | 5619 | ||
| 5642 | static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) | 5620 | static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) |
| 5643 | { | 5621 | { |
| 5622 | struct drm_device *dev = dev_priv->dev; | ||
| 5644 | u32 freq_select, pcu_ack; | 5623 | u32 freq_select, pcu_ack; |
| 5645 | 5624 | ||
| 5646 | DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); | 5625 | DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); |
| @@ -5681,6 +5660,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) | |||
| 5681 | mutex_lock(&dev_priv->rps.hw_lock); | 5660 | mutex_lock(&dev_priv->rps.hw_lock); |
| 5682 | sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); | 5661 | sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); |
| 5683 | mutex_unlock(&dev_priv->rps.hw_lock); | 5662 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 5663 | |||
| 5664 | intel_update_cdclk(dev); | ||
| 5684 | } | 5665 | } |
| 5685 | 5666 | ||
| 5686 | void skl_uninit_cdclk(struct drm_i915_private *dev_priv) | 5667 | void skl_uninit_cdclk(struct drm_i915_private *dev_priv) |
| @@ -5751,22 +5732,6 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv) | |||
| 5751 | return vco_freq[hpll_freq] * 1000; | 5732 | return vco_freq[hpll_freq] * 1000; |
| 5752 | } | 5733 | } |
| 5753 | 5734 | ||
| 5754 | static void vlv_update_cdclk(struct drm_device *dev) | ||
| 5755 | { | ||
| 5756 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5757 | |||
| 5758 | dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); | ||
| 5759 | DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", | ||
| 5760 | dev_priv->cdclk_freq); | ||
| 5761 | |||
| 5762 | /* | ||
| 5763 | * Program the gmbus_freq based on the cdclk frequency. | ||
| 5764 | * BSpec erroneously claims we should aim for 4MHz, but | ||
| 5765 | * in fact 1MHz is the correct frequency. | ||
| 5766 | */ | ||
| 5767 | I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); | ||
| 5768 | } | ||
| 5769 | |||
| 5770 | /* Adjust CDclk dividers to allow high res or save power if possible */ | 5735 | /* Adjust CDclk dividers to allow high res or save power if possible */ |
| 5771 | static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) | 5736 | static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) |
| 5772 | { | 5737 | { |
| @@ -5830,7 +5795,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) | |||
| 5830 | 5795 | ||
| 5831 | mutex_unlock(&dev_priv->sb_lock); | 5796 | mutex_unlock(&dev_priv->sb_lock); |
| 5832 | 5797 | ||
| 5833 | vlv_update_cdclk(dev); | 5798 | intel_update_cdclk(dev); |
| 5834 | } | 5799 | } |
| 5835 | 5800 | ||
| 5836 | static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) | 5801 | static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) |
| @@ -5871,7 +5836,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) | |||
| 5871 | } | 5836 | } |
| 5872 | mutex_unlock(&dev_priv->rps.hw_lock); | 5837 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 5873 | 5838 | ||
| 5874 | vlv_update_cdclk(dev); | 5839 | intel_update_cdclk(dev); |
| 5875 | } | 5840 | } |
| 5876 | 5841 | ||
| 5877 | static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, | 5842 | static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, |
| @@ -5934,11 +5899,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev, | |||
| 5934 | int max_pixclk = 0; | 5899 | int max_pixclk = 0; |
| 5935 | 5900 | ||
| 5936 | for_each_intel_crtc(dev, intel_crtc) { | 5901 | for_each_intel_crtc(dev, intel_crtc) { |
| 5937 | if (state) | 5902 | crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); |
| 5938 | crtc_state = | ||
| 5939 | intel_atomic_get_crtc_state(state, intel_crtc); | ||
| 5940 | else | ||
| 5941 | crtc_state = intel_crtc->config; | ||
| 5942 | if (IS_ERR(crtc_state)) | 5903 | if (IS_ERR(crtc_state)) |
| 5943 | return PTR_ERR(crtc_state); | 5904 | return PTR_ERR(crtc_state); |
| 5944 | 5905 | ||
| @@ -5952,39 +5913,32 @@ static int intel_mode_max_pixclk(struct drm_device *dev, | |||
| 5952 | return max_pixclk; | 5913 | return max_pixclk; |
| 5953 | } | 5914 | } |
| 5954 | 5915 | ||
| 5955 | static int valleyview_modeset_global_pipes(struct drm_atomic_state *state) | 5916 | static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) |
| 5956 | { | 5917 | { |
| 5957 | struct drm_i915_private *dev_priv = to_i915(state->dev); | 5918 | struct drm_device *dev = state->dev; |
| 5958 | struct drm_crtc *crtc; | 5919 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5959 | struct drm_crtc_state *crtc_state; | 5920 | int max_pixclk = intel_mode_max_pixclk(dev, state); |
| 5960 | int max_pixclk = intel_mode_max_pixclk(state->dev, state); | ||
| 5961 | int cdclk, i; | ||
| 5962 | 5921 | ||
| 5963 | if (max_pixclk < 0) | 5922 | if (max_pixclk < 0) |
| 5964 | return max_pixclk; | 5923 | return max_pixclk; |
| 5965 | 5924 | ||
| 5966 | if (IS_VALLEYVIEW(dev_priv)) | 5925 | to_intel_atomic_state(state)->cdclk = |
| 5967 | cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); | 5926 | valleyview_calc_cdclk(dev_priv, max_pixclk); |
| 5968 | else | ||
| 5969 | cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); | ||
| 5970 | 5927 | ||
| 5971 | if (cdclk == dev_priv->cdclk_freq) | 5928 | return 0; |
| 5972 | return 0; | 5929 | } |
| 5973 | 5930 | ||
| 5974 | /* add all active pipes to the state */ | 5931 | static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) |
| 5975 | for_each_crtc(state->dev, crtc) { | 5932 | { |
| 5976 | if (!crtc->state->enable) | 5933 | struct drm_device *dev = state->dev; |
| 5977 | continue; | 5934 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5935 | int max_pixclk = intel_mode_max_pixclk(dev, state); | ||
| 5978 | 5936 | ||
| 5979 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | 5937 | if (max_pixclk < 0) |
| 5980 | if (IS_ERR(crtc_state)) | 5938 | return max_pixclk; |
| 5981 | return PTR_ERR(crtc_state); | ||
| 5982 | } | ||
| 5983 | 5939 | ||
| 5984 | /* disable/enable all currently active pipes while we change cdclk */ | 5940 | to_intel_atomic_state(state)->cdclk = |
| 5985 | for_each_crtc_in_state(state, crtc, crtc_state, i) | 5941 | broxton_calc_cdclk(dev_priv, max_pixclk); |
| 5986 | if (crtc_state->enable) | ||
| 5987 | crtc_state->mode_changed = true; | ||
| 5988 | 5942 | ||
| 5989 | return 0; | 5943 | return 0; |
| 5990 | } | 5944 | } |
| @@ -6001,7 +5955,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) | |||
| 6001 | if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { | 5955 | if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { |
| 6002 | /* CHV suggested value is 31 or 63 */ | 5956 | /* CHV suggested value is 31 or 63 */ |
| 6003 | if (IS_CHERRYVIEW(dev_priv)) | 5957 | if (IS_CHERRYVIEW(dev_priv)) |
| 6004 | credits = PFI_CREDIT_31; | 5958 | credits = PFI_CREDIT_63; |
| 6005 | else | 5959 | else |
| 6006 | credits = PFI_CREDIT(15); | 5960 | credits = PFI_CREDIT(15); |
| 6007 | } else { | 5961 | } else { |
| @@ -6025,41 +5979,31 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) | |||
| 6025 | WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); | 5979 | WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); |
| 6026 | } | 5980 | } |
| 6027 | 5981 | ||
| 6028 | static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state) | 5982 | static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) |
| 6029 | { | 5983 | { |
| 6030 | struct drm_device *dev = old_state->dev; | 5984 | struct drm_device *dev = old_state->dev; |
| 5985 | unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; | ||
| 6031 | struct drm_i915_private *dev_priv = dev->dev_private; | 5986 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 6032 | int max_pixclk = intel_mode_max_pixclk(dev, NULL); | ||
| 6033 | int req_cdclk; | ||
| 6034 | |||
| 6035 | /* The path in intel_mode_max_pixclk() with a NULL atomic state should | ||
| 6036 | * never fail. */ | ||
| 6037 | if (WARN_ON(max_pixclk < 0)) | ||
| 6038 | return; | ||
| 6039 | |||
| 6040 | req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); | ||
| 6041 | 5987 | ||
| 6042 | if (req_cdclk != dev_priv->cdclk_freq) { | 5988 | /* |
| 6043 | /* | 5989 | * FIXME: We can end up here with all power domains off, yet |
| 6044 | * FIXME: We can end up here with all power domains off, yet | 5990 | * with a CDCLK frequency other than the minimum. To account |
| 6045 | * with a CDCLK frequency other than the minimum. To account | 5991 | * for this take the PIPE-A power domain, which covers the HW |
| 6046 | * for this take the PIPE-A power domain, which covers the HW | 5992 | * blocks needed for the following programming. This can be |
| 6047 | * blocks needed for the following programming. This can be | 5993 | * removed once it's guaranteed that we get here either with |
| 6048 | * removed once it's guaranteed that we get here either with | 5994 | * the minimum CDCLK set, or the required power domains |
| 6049 | * the minimum CDCLK set, or the required power domains | 5995 | * enabled. |
| 6050 | * enabled. | 5996 | */ |
| 6051 | */ | 5997 | intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); |
| 6052 | intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); | ||
| 6053 | 5998 | ||
| 6054 | if (IS_CHERRYVIEW(dev)) | 5999 | if (IS_CHERRYVIEW(dev)) |
| 6055 | cherryview_set_cdclk(dev, req_cdclk); | 6000 | cherryview_set_cdclk(dev, req_cdclk); |
| 6056 | else | 6001 | else |
| 6057 | valleyview_set_cdclk(dev, req_cdclk); | 6002 | valleyview_set_cdclk(dev, req_cdclk); |
| 6058 | 6003 | ||
| 6059 | vlv_program_pfi_credits(dev_priv); | 6004 | vlv_program_pfi_credits(dev_priv); |
| 6060 | 6005 | ||
| 6061 | intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); | 6006 | intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); |
| 6062 | } | ||
| 6063 | } | 6007 | } |
| 6064 | 6008 | ||
| 6065 | static void valleyview_crtc_enable(struct drm_crtc *crtc) | 6009 | static void valleyview_crtc_enable(struct drm_crtc *crtc) |
| @@ -6071,9 +6015,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
| 6071 | int pipe = intel_crtc->pipe; | 6015 | int pipe = intel_crtc->pipe; |
| 6072 | bool is_dsi; | 6016 | bool is_dsi; |
| 6073 | 6017 | ||
| 6074 | WARN_ON(!crtc->state->enable); | 6018 | if (WARN_ON(intel_crtc->active)) |
| 6075 | |||
| 6076 | if (intel_crtc->active) | ||
| 6077 | return; | 6019 | return; |
| 6078 | 6020 | ||
| 6079 | is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); | 6021 | is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); |
| @@ -6122,7 +6064,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
| 6122 | 6064 | ||
| 6123 | intel_crtc_load_lut(crtc); | 6065 | intel_crtc_load_lut(crtc); |
| 6124 | 6066 | ||
| 6125 | intel_update_watermarks(crtc); | ||
| 6126 | intel_enable_pipe(intel_crtc); | 6067 | intel_enable_pipe(intel_crtc); |
| 6127 | 6068 | ||
| 6128 | assert_vblank_disabled(crtc); | 6069 | assert_vblank_disabled(crtc); |
| @@ -6149,9 +6090,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
| 6149 | struct intel_encoder *encoder; | 6090 | struct intel_encoder *encoder; |
| 6150 | int pipe = intel_crtc->pipe; | 6091 | int pipe = intel_crtc->pipe; |
| 6151 | 6092 | ||
| 6152 | WARN_ON(!crtc->state->enable); | 6093 | if (WARN_ON(intel_crtc->active)) |
| 6153 | |||
| 6154 | if (intel_crtc->active) | ||
| 6155 | return; | 6094 | return; |
| 6156 | 6095 | ||
| 6157 | i9xx_set_pll_dividers(intel_crtc); | 6096 | i9xx_set_pll_dividers(intel_crtc); |
| @@ -6211,9 +6150,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
| 6211 | struct intel_encoder *encoder; | 6150 | struct intel_encoder *encoder; |
| 6212 | int pipe = intel_crtc->pipe; | 6151 | int pipe = intel_crtc->pipe; |
| 6213 | 6152 | ||
| 6214 | if (!intel_crtc->active) | ||
| 6215 | return; | ||
| 6216 | |||
| 6217 | /* | 6153 | /* |
| 6218 | * On gen2 planes are double buffered but the pipe isn't, so we must | 6154 | * On gen2 planes are double buffered but the pipe isn't, so we must |
| 6219 | * wait for planes to fully turn off before disabling the pipe. | 6155 | * wait for planes to fully turn off before disabling the pipe. |
| @@ -6250,46 +6186,135 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
| 6250 | 6186 | ||
| 6251 | intel_crtc->active = false; | 6187 | intel_crtc->active = false; |
| 6252 | intel_update_watermarks(crtc); | 6188 | intel_update_watermarks(crtc); |
| 6189 | } | ||
| 6253 | 6190 | ||
| 6254 | mutex_lock(&dev->struct_mutex); | 6191 | static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) |
| 6255 | intel_fbc_update(dev); | 6192 | { |
| 6256 | mutex_unlock(&dev->struct_mutex); | 6193 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 6194 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | ||
| 6195 | enum intel_display_power_domain domain; | ||
| 6196 | unsigned long domains; | ||
| 6197 | |||
| 6198 | if (!intel_crtc->active) | ||
| 6199 | return; | ||
| 6200 | |||
| 6201 | if (to_intel_plane_state(crtc->primary->state)->visible) { | ||
| 6202 | intel_crtc_wait_for_pending_flips(crtc); | ||
| 6203 | intel_pre_disable_primary(crtc); | ||
| 6204 | } | ||
| 6205 | |||
| 6206 | intel_crtc_disable_planes(crtc, crtc->state->plane_mask); | ||
| 6207 | dev_priv->display.crtc_disable(crtc); | ||
| 6208 | intel_disable_shared_dpll(intel_crtc); | ||
| 6209 | |||
| 6210 | domains = intel_crtc->enabled_power_domains; | ||
| 6211 | for_each_power_domain(domain, domains) | ||
| 6212 | intel_display_power_put(dev_priv, domain); | ||
| 6213 | intel_crtc->enabled_power_domains = 0; | ||
| 6257 | } | 6214 | } |
| 6258 | 6215 | ||
| 6259 | static void i9xx_crtc_off(struct drm_crtc *crtc) | 6216 | /* |
| 6217 | * turn all crtc's off, but do not adjust state | ||
| 6218 | * This has to be paired with a call to intel_modeset_setup_hw_state. | ||
| 6219 | */ | ||
| 6220 | int intel_display_suspend(struct drm_device *dev) | ||
| 6260 | { | 6221 | { |
| 6222 | struct drm_mode_config *config = &dev->mode_config; | ||
| 6223 | struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; | ||
| 6224 | struct drm_atomic_state *state; | ||
| 6225 | struct drm_crtc *crtc; | ||
| 6226 | unsigned crtc_mask = 0; | ||
| 6227 | int ret = 0; | ||
| 6228 | |||
| 6229 | if (WARN_ON(!ctx)) | ||
| 6230 | return 0; | ||
| 6231 | |||
| 6232 | lockdep_assert_held(&ctx->ww_ctx); | ||
| 6233 | state = drm_atomic_state_alloc(dev); | ||
| 6234 | if (WARN_ON(!state)) | ||
| 6235 | return -ENOMEM; | ||
| 6236 | |||
| 6237 | state->acquire_ctx = ctx; | ||
| 6238 | state->allow_modeset = true; | ||
| 6239 | |||
| 6240 | for_each_crtc(dev, crtc) { | ||
| 6241 | struct drm_crtc_state *crtc_state = | ||
| 6242 | drm_atomic_get_crtc_state(state, crtc); | ||
| 6243 | |||
| 6244 | ret = PTR_ERR_OR_ZERO(crtc_state); | ||
| 6245 | if (ret) | ||
| 6246 | goto free; | ||
| 6247 | |||
| 6248 | if (!crtc_state->active) | ||
| 6249 | continue; | ||
| 6250 | |||
| 6251 | crtc_state->active = false; | ||
| 6252 | crtc_mask |= 1 << drm_crtc_index(crtc); | ||
| 6253 | } | ||
| 6254 | |||
| 6255 | if (crtc_mask) { | ||
| 6256 | ret = drm_atomic_commit(state); | ||
| 6257 | |||
| 6258 | if (!ret) { | ||
| 6259 | for_each_crtc(dev, crtc) | ||
| 6260 | if (crtc_mask & (1 << drm_crtc_index(crtc))) | ||
| 6261 | crtc->state->active = true; | ||
| 6262 | |||
| 6263 | return ret; | ||
| 6264 | } | ||
| 6265 | } | ||
| 6266 | |||
| 6267 | free: | ||
| 6268 | if (ret) | ||
| 6269 | DRM_ERROR("Suspending crtc's failed with %i\n", ret); | ||
| 6270 | drm_atomic_state_free(state); | ||
| 6271 | return ret; | ||
| 6261 | } | 6272 | } |
| 6262 | 6273 | ||
| 6263 | /* Master function to enable/disable CRTC and corresponding power wells */ | 6274 | /* Master function to enable/disable CRTC and corresponding power wells */ |
| 6264 | void intel_crtc_control(struct drm_crtc *crtc, bool enable) | 6275 | int intel_crtc_control(struct drm_crtc *crtc, bool enable) |
| 6265 | { | 6276 | { |
| 6266 | struct drm_device *dev = crtc->dev; | 6277 | struct drm_device *dev = crtc->dev; |
| 6267 | struct drm_i915_private *dev_priv = dev->dev_private; | 6278 | struct drm_mode_config *config = &dev->mode_config; |
| 6279 | struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; | ||
| 6268 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6280 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 6269 | enum intel_display_power_domain domain; | 6281 | struct intel_crtc_state *pipe_config; |
| 6270 | unsigned long domains; | 6282 | struct drm_atomic_state *state; |
| 6283 | int ret; | ||
| 6271 | 6284 | ||
| 6272 | if (enable) { | 6285 | if (enable == intel_crtc->active) |
| 6273 | if (!intel_crtc->active) { | 6286 | return 0; |
| 6274 | domains = get_crtc_power_domains(crtc); | ||
| 6275 | for_each_power_domain(domain, domains) | ||
| 6276 | intel_display_power_get(dev_priv, domain); | ||
| 6277 | intel_crtc->enabled_power_domains = domains; | ||
| 6278 | 6287 | ||
| 6279 | dev_priv->display.crtc_enable(crtc); | 6288 | if (enable && !crtc->state->enable) |
| 6280 | intel_crtc_enable_planes(crtc); | 6289 | return 0; |
| 6281 | } | ||
| 6282 | } else { | ||
| 6283 | if (intel_crtc->active) { | ||
| 6284 | intel_crtc_disable_planes(crtc); | ||
| 6285 | dev_priv->display.crtc_disable(crtc); | ||
| 6286 | 6290 | ||
| 6287 | domains = intel_crtc->enabled_power_domains; | 6291 | /* this function should be called with drm_modeset_lock_all for now */ |
| 6288 | for_each_power_domain(domain, domains) | 6292 | if (WARN_ON(!ctx)) |
| 6289 | intel_display_power_put(dev_priv, domain); | 6293 | return -EIO; |
| 6290 | intel_crtc->enabled_power_domains = 0; | 6294 | lockdep_assert_held(&ctx->ww_ctx); |
| 6291 | } | 6295 | |
| 6296 | state = drm_atomic_state_alloc(dev); | ||
| 6297 | if (WARN_ON(!state)) | ||
| 6298 | return -ENOMEM; | ||
| 6299 | |||
| 6300 | state->acquire_ctx = ctx; | ||
| 6301 | state->allow_modeset = true; | ||
| 6302 | |||
| 6303 | pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); | ||
| 6304 | if (IS_ERR(pipe_config)) { | ||
| 6305 | ret = PTR_ERR(pipe_config); | ||
| 6306 | goto err; | ||
| 6292 | } | 6307 | } |
| 6308 | pipe_config->base.active = enable; | ||
| 6309 | |||
| 6310 | ret = drm_atomic_commit(state); | ||
| 6311 | if (!ret) | ||
| 6312 | return ret; | ||
| 6313 | |||
| 6314 | err: | ||
| 6315 | DRM_ERROR("Updating crtc active failed with %i\n", ret); | ||
| 6316 | drm_atomic_state_free(state); | ||
| 6317 | return ret; | ||
| 6293 | } | 6318 | } |
| 6294 | 6319 | ||
| 6295 | /** | 6320 | /** |
| @@ -6305,33 +6330,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc) | |||
| 6305 | enable |= intel_encoder->connectors_active; | 6330 | enable |= intel_encoder->connectors_active; |
| 6306 | 6331 | ||
| 6307 | intel_crtc_control(crtc, enable); | 6332 | intel_crtc_control(crtc, enable); |
| 6308 | |||
| 6309 | crtc->state->active = enable; | ||
| 6310 | } | ||
| 6311 | |||
| 6312 | static void intel_crtc_disable(struct drm_crtc *crtc) | ||
| 6313 | { | ||
| 6314 | struct drm_device *dev = crtc->dev; | ||
| 6315 | struct drm_connector *connector; | ||
| 6316 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 6317 | |||
| 6318 | intel_crtc_disable_planes(crtc); | ||
| 6319 | dev_priv->display.crtc_disable(crtc); | ||
| 6320 | dev_priv->display.off(crtc); | ||
| 6321 | |||
| 6322 | drm_plane_helper_disable(crtc->primary); | ||
| 6323 | |||
| 6324 | /* Update computed state. */ | ||
| 6325 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 6326 | if (!connector->encoder || !connector->encoder->crtc) | ||
| 6327 | continue; | ||
| 6328 | |||
| 6329 | if (connector->encoder->crtc != crtc) | ||
| 6330 | continue; | ||
| 6331 | |||
| 6332 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
| 6333 | to_intel_encoder(connector->encoder)->connectors_active = false; | ||
| 6334 | } | ||
| 6335 | } | 6333 | } |
| 6336 | 6334 | ||
| 6337 | void intel_encoder_destroy(struct drm_encoder *encoder) | 6335 | void intel_encoder_destroy(struct drm_encoder *encoder) |
| @@ -6586,12 +6584,36 @@ retry: | |||
| 6586 | return ret; | 6584 | return ret; |
| 6587 | } | 6585 | } |
| 6588 | 6586 | ||
| 6587 | static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, | ||
| 6588 | struct intel_crtc_state *pipe_config) | ||
| 6589 | { | ||
| 6590 | if (pipe_config->pipe_bpp > 24) | ||
| 6591 | return false; | ||
| 6592 | |||
| 6593 | /* HSW can handle pixel rate up to cdclk? */ | ||
| 6594 | if (IS_HASWELL(dev_priv->dev)) | ||
| 6595 | return true; | ||
| 6596 | |||
| 6597 | /* | ||
| 6598 | * We compare against max which means we must take | ||
| 6599 | * the increased cdclk requirement into account when | ||
| 6600 | * calculating the new cdclk. | ||
| 6601 | * | ||
| 6602 | * Should measure whether using a lower cdclk w/o IPS | ||
| 6603 | */ | ||
| 6604 | return ilk_pipe_pixel_rate(pipe_config) <= | ||
| 6605 | dev_priv->max_cdclk_freq * 95 / 100; | ||
| 6606 | } | ||
| 6607 | |||
| 6589 | static void hsw_compute_ips_config(struct intel_crtc *crtc, | 6608 | static void hsw_compute_ips_config(struct intel_crtc *crtc, |
| 6590 | struct intel_crtc_state *pipe_config) | 6609 | struct intel_crtc_state *pipe_config) |
| 6591 | { | 6610 | { |
| 6611 | struct drm_device *dev = crtc->base.dev; | ||
| 6612 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 6613 | |||
| 6592 | pipe_config->ips_enabled = i915.enable_ips && | 6614 | pipe_config->ips_enabled = i915.enable_ips && |
| 6593 | hsw_crtc_supports_ips(crtc) && | 6615 | hsw_crtc_supports_ips(crtc) && |
| 6594 | pipe_config->pipe_bpp <= 24; | 6616 | pipe_config_supports_ips(dev_priv, pipe_config); |
| 6595 | } | 6617 | } |
| 6596 | 6618 | ||
| 6597 | static int intel_crtc_compute_config(struct intel_crtc *crtc, | 6619 | static int intel_crtc_compute_config(struct intel_crtc *crtc, |
| @@ -6600,12 +6622,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, | |||
| 6600 | struct drm_device *dev = crtc->base.dev; | 6622 | struct drm_device *dev = crtc->base.dev; |
| 6601 | struct drm_i915_private *dev_priv = dev->dev_private; | 6623 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 6602 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 6624 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
| 6603 | int ret; | ||
| 6604 | 6625 | ||
| 6605 | /* FIXME should check pixel clock limits on all platforms */ | 6626 | /* FIXME should check pixel clock limits on all platforms */ |
| 6606 | if (INTEL_INFO(dev)->gen < 4) { | 6627 | if (INTEL_INFO(dev)->gen < 4) { |
| 6607 | int clock_limit = | 6628 | int clock_limit = dev_priv->max_cdclk_freq; |
| 6608 | dev_priv->display.get_display_clock_speed(dev); | ||
| 6609 | 6629 | ||
| 6610 | /* | 6630 | /* |
| 6611 | * Enable pixel doubling when the dot clock | 6631 | * Enable pixel doubling when the dot clock |
| @@ -6647,14 +6667,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, | |||
| 6647 | if (pipe_config->has_pch_encoder) | 6667 | if (pipe_config->has_pch_encoder) |
| 6648 | return ironlake_fdi_compute_config(crtc, pipe_config); | 6668 | return ironlake_fdi_compute_config(crtc, pipe_config); |
| 6649 | 6669 | ||
| 6650 | /* FIXME: remove below call once atomic mode set is place and all crtc | 6670 | return 0; |
| 6651 | * related checks called from atomic_crtc_check function */ | ||
| 6652 | ret = 0; | ||
| 6653 | DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n", | ||
| 6654 | crtc, pipe_config->base.state); | ||
| 6655 | ret = intel_atomic_setup_scalers(dev, crtc, pipe_config); | ||
| 6656 | |||
| 6657 | return ret; | ||
| 6658 | } | 6671 | } |
| 6659 | 6672 | ||
| 6660 | static int skylake_get_display_clock_speed(struct drm_device *dev) | 6673 | static int skylake_get_display_clock_speed(struct drm_device *dev) |
| @@ -6664,10 +6677,8 @@ static int skylake_get_display_clock_speed(struct drm_device *dev) | |||
| 6664 | uint32_t cdctl = I915_READ(CDCLK_CTL); | 6677 | uint32_t cdctl = I915_READ(CDCLK_CTL); |
| 6665 | uint32_t linkrate; | 6678 | uint32_t linkrate; |
| 6666 | 6679 | ||
| 6667 | if (!(lcpll1 & LCPLL_PLL_ENABLE)) { | 6680 | if (!(lcpll1 & LCPLL_PLL_ENABLE)) |
| 6668 | WARN(1, "LCPLL1 not enabled\n"); | ||
| 6669 | return 24000; /* 24MHz is the cd freq with NSSC ref */ | 6681 | return 24000; /* 24MHz is the cd freq with NSSC ref */ |
| 6670 | } | ||
| 6671 | 6682 | ||
| 6672 | if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) | 6683 | if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) |
| 6673 | return 540000; | 6684 | return 540000; |
| @@ -6706,6 +6717,34 @@ static int skylake_get_display_clock_speed(struct drm_device *dev) | |||
| 6706 | return 24000; | 6717 | return 24000; |
| 6707 | } | 6718 | } |
| 6708 | 6719 | ||
| 6720 | static int broxton_get_display_clock_speed(struct drm_device *dev) | ||
| 6721 | { | ||
| 6722 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
| 6723 | uint32_t cdctl = I915_READ(CDCLK_CTL); | ||
| 6724 | uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; | ||
| 6725 | uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); | ||
| 6726 | int cdclk; | ||
| 6727 | |||
| 6728 | if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) | ||
| 6729 | return 19200; | ||
| 6730 | |||
| 6731 | cdclk = 19200 * pll_ratio / 2; | ||
| 6732 | |||
| 6733 | switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { | ||
| 6734 | case BXT_CDCLK_CD2X_DIV_SEL_1: | ||
| 6735 | return cdclk; /* 576MHz or 624MHz */ | ||
| 6736 | case BXT_CDCLK_CD2X_DIV_SEL_1_5: | ||
| 6737 | return cdclk * 2 / 3; /* 384MHz */ | ||
| 6738 | case BXT_CDCLK_CD2X_DIV_SEL_2: | ||
| 6739 | return cdclk / 2; /* 288MHz */ | ||
| 6740 | case BXT_CDCLK_CD2X_DIV_SEL_4: | ||
| 6741 | return cdclk / 4; /* 144MHz */ | ||
| 6742 | } | ||
| 6743 | |||
| 6744 | /* error case, do as if DE PLL isn't enabled */ | ||
| 6745 | return 19200; | ||
| 6746 | } | ||
| 6747 | |||
| 6709 | static int broadwell_get_display_clock_speed(struct drm_device *dev) | 6748 | static int broadwell_get_display_clock_speed(struct drm_device *dev) |
| 6710 | { | 6749 | { |
| 6711 | struct drm_i915_private *dev_priv = dev->dev_private; | 6750 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -6834,20 +6873,37 @@ static int i865_get_display_clock_speed(struct drm_device *dev) | |||
| 6834 | return 266667; | 6873 | return 266667; |
| 6835 | } | 6874 | } |
| 6836 | 6875 | ||
| 6837 | static int i855_get_display_clock_speed(struct drm_device *dev) | 6876 | static int i85x_get_display_clock_speed(struct drm_device *dev) |
| 6838 | { | 6877 | { |
| 6839 | u16 hpllcc = 0; | 6878 | u16 hpllcc = 0; |
| 6879 | |||
| 6880 | /* | ||
| 6881 | * 852GM/852GMV only supports 133 MHz and the HPLLCC | ||
| 6882 | * encoding is different :( | ||
| 6883 | * FIXME is this the right way to detect 852GM/852GMV? | ||
| 6884 | */ | ||
| 6885 | if (dev->pdev->revision == 0x1) | ||
| 6886 | return 133333; | ||
| 6887 | |||
| 6888 | pci_bus_read_config_word(dev->pdev->bus, | ||
| 6889 | PCI_DEVFN(0, 3), HPLLCC, &hpllcc); | ||
| 6890 | |||
| 6840 | /* Assume that the hardware is in the high speed state. This | 6891 | /* Assume that the hardware is in the high speed state. This |
| 6841 | * should be the default. | 6892 | * should be the default. |
| 6842 | */ | 6893 | */ |
| 6843 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | 6894 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { |
| 6844 | case GC_CLOCK_133_200: | 6895 | case GC_CLOCK_133_200: |
| 6896 | case GC_CLOCK_133_200_2: | ||
| 6845 | case GC_CLOCK_100_200: | 6897 | case GC_CLOCK_100_200: |
| 6846 | return 200000; | 6898 | return 200000; |
| 6847 | case GC_CLOCK_166_250: | 6899 | case GC_CLOCK_166_250: |
| 6848 | return 250000; | 6900 | return 250000; |
| 6849 | case GC_CLOCK_100_133: | 6901 | case GC_CLOCK_100_133: |
| 6850 | return 133333; | 6902 | return 133333; |
| 6903 | case GC_CLOCK_133_266: | ||
| 6904 | case GC_CLOCK_133_266_2: | ||
| 6905 | case GC_CLOCK_166_266: | ||
| 6906 | return 266667; | ||
| 6851 | } | 6907 | } |
| 6852 | 6908 | ||
| 6853 | /* Shouldn't happen */ | 6909 | /* Shouldn't happen */ |
| @@ -6859,6 +6915,175 @@ static int i830_get_display_clock_speed(struct drm_device *dev) | |||
| 6859 | return 133333; | 6915 | return 133333; |
| 6860 | } | 6916 | } |
| 6861 | 6917 | ||
| 6918 | static unsigned int intel_hpll_vco(struct drm_device *dev) | ||
| 6919 | { | ||
| 6920 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 6921 | static const unsigned int blb_vco[8] = { | ||
| 6922 | [0] = 3200000, | ||
| 6923 | [1] = 4000000, | ||
| 6924 | [2] = 5333333, | ||
| 6925 | [3] = 4800000, | ||
| 6926 | [4] = 6400000, | ||
| 6927 | }; | ||
| 6928 | static const unsigned int pnv_vco[8] = { | ||
| 6929 | [0] = 3200000, | ||
| 6930 | [1] = 4000000, | ||
| 6931 | [2] = 5333333, | ||
| 6932 | [3] = 4800000, | ||
| 6933 | [4] = 2666667, | ||
| 6934 | }; | ||
| 6935 | static const unsigned int cl_vco[8] = { | ||
| 6936 | [0] = 3200000, | ||
| 6937 | [1] = 4000000, | ||
| 6938 | [2] = 5333333, | ||
| 6939 | [3] = 6400000, | ||
| 6940 | [4] = 3333333, | ||
| 6941 | [5] = 3566667, | ||
| 6942 | [6] = 4266667, | ||
| 6943 | }; | ||
| 6944 | static const unsigned int elk_vco[8] = { | ||
| 6945 | [0] = 3200000, | ||
| 6946 | [1] = 4000000, | ||
| 6947 | [2] = 5333333, | ||
| 6948 | [3] = 4800000, | ||
| 6949 | }; | ||
| 6950 | static const unsigned int ctg_vco[8] = { | ||
| 6951 | [0] = 3200000, | ||
| 6952 | [1] = 4000000, | ||
| 6953 | [2] = 5333333, | ||
| 6954 | [3] = 6400000, | ||
| 6955 | [4] = 2666667, | ||
| 6956 | [5] = 4266667, | ||
| 6957 | }; | ||
| 6958 | const unsigned int *vco_table; | ||
| 6959 | unsigned int vco; | ||
| 6960 | uint8_t tmp = 0; | ||
| 6961 | |||
| 6962 | /* FIXME other chipsets? */ | ||
| 6963 | if (IS_GM45(dev)) | ||
| 6964 | vco_table = ctg_vco; | ||
| 6965 | else if (IS_G4X(dev)) | ||
| 6966 | vco_table = elk_vco; | ||
| 6967 | else if (IS_CRESTLINE(dev)) | ||
| 6968 | vco_table = cl_vco; | ||
| 6969 | else if (IS_PINEVIEW(dev)) | ||
| 6970 | vco_table = pnv_vco; | ||
| 6971 | else if (IS_G33(dev)) | ||
| 6972 | vco_table = blb_vco; | ||
| 6973 | else | ||
| 6974 | return 0; | ||
| 6975 | |||
| 6976 | tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO); | ||
| 6977 | |||
| 6978 | vco = vco_table[tmp & 0x7]; | ||
| 6979 | if (vco == 0) | ||
| 6980 | DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); | ||
| 6981 | else | ||
| 6982 | DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco); | ||
| 6983 | |||
| 6984 | return vco; | ||
| 6985 | } | ||
| 6986 | |||
| 6987 | static int gm45_get_display_clock_speed(struct drm_device *dev) | ||
| 6988 | { | ||
| 6989 | unsigned int cdclk_sel, vco = intel_hpll_vco(dev); | ||
| 6990 | uint16_t tmp = 0; | ||
| 6991 | |||
| 6992 | pci_read_config_word(dev->pdev, GCFGC, &tmp); | ||
| 6993 | |||
| 6994 | cdclk_sel = (tmp >> 12) & 0x1; | ||
| 6995 | |||
| 6996 | switch (vco) { | ||
| 6997 | case 2666667: | ||
| 6998 | case 4000000: | ||
| 6999 | case 5333333: | ||
| 7000 | return cdclk_sel ? 333333 : 222222; | ||
| 7001 | case 3200000: | ||
| 7002 | return cdclk_sel ? 320000 : 228571; | ||
| 7003 | default: | ||
| 7004 | DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp); | ||
| 7005 | return 222222; | ||
| 7006 | } | ||
| 7007 | } | ||
| 7008 | |||
| 7009 | static int i965gm_get_display_clock_speed(struct drm_device *dev) | ||
| 7010 | { | ||
| 7011 | static const uint8_t div_3200[] = { 16, 10, 8 }; | ||
| 7012 | static const uint8_t div_4000[] = { 20, 12, 10 }; | ||
| 7013 | static const uint8_t div_5333[] = { 24, 16, 14 }; | ||
| 7014 | const uint8_t *div_table; | ||
| 7015 | unsigned int cdclk_sel, vco = intel_hpll_vco(dev); | ||
| 7016 | uint16_t tmp = 0; | ||
| 7017 | |||
| 7018 | pci_read_config_word(dev->pdev, GCFGC, &tmp); | ||
| 7019 | |||
| 7020 | cdclk_sel = ((tmp >> 8) & 0x1f) - 1; | ||
| 7021 | |||
| 7022 | if (cdclk_sel >= ARRAY_SIZE(div_3200)) | ||
| 7023 | goto fail; | ||
| 7024 | |||
| 7025 | switch (vco) { | ||
| 7026 | case 3200000: | ||
| 7027 | div_table = div_3200; | ||
| 7028 | break; | ||
| 7029 | case 4000000: | ||
| 7030 | div_table = div_4000; | ||
| 7031 | break; | ||
| 7032 | case 5333333: | ||
| 7033 | div_table = div_5333; | ||
| 7034 | break; | ||
| 7035 | default: | ||
| 7036 | goto fail; | ||
| 7037 | } | ||
| 7038 | |||
| 7039 | return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); | ||
| 7040 | |||
| 7041 | fail: | ||
| 7042 | DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp); | ||
| 7043 | return 200000; | ||
| 7044 | } | ||
| 7045 | |||
| 7046 | static int g33_get_display_clock_speed(struct drm_device *dev) | ||
| 7047 | { | ||
| 7048 | static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; | ||
| 7049 | static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; | ||
| 7050 | static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; | ||
| 7051 | static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; | ||
| 7052 | const uint8_t *div_table; | ||
| 7053 | unsigned int cdclk_sel, vco = intel_hpll_vco(dev); | ||
| 7054 | uint16_t tmp = 0; | ||
| 7055 | |||
| 7056 | pci_read_config_word(dev->pdev, GCFGC, &tmp); | ||
| 7057 | |||
| 7058 | cdclk_sel = (tmp >> 4) & 0x7; | ||
| 7059 | |||
| 7060 | if (cdclk_sel >= ARRAY_SIZE(div_3200)) | ||
| 7061 | goto fail; | ||
| 7062 | |||
| 7063 | switch (vco) { | ||
| 7064 | case 3200000: | ||
| 7065 | div_table = div_3200; | ||
| 7066 | break; | ||
| 7067 | case 4000000: | ||
| 7068 | div_table = div_4000; | ||
| 7069 | break; | ||
| 7070 | case 4800000: | ||
| 7071 | div_table = div_4800; | ||
| 7072 | break; | ||
| 7073 | case 5333333: | ||
| 7074 | div_table = div_5333; | ||
| 7075 | break; | ||
| 7076 | default: | ||
| 7077 | goto fail; | ||
| 7078 | } | ||
| 7079 | |||
| 7080 | return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); | ||
| 7081 | |||
| 7082 | fail: | ||
| 7083 | DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp); | ||
| 7084 | return 190476; | ||
| 7085 | } | ||
| 7086 | |||
| 6862 | static void | 7087 | static void |
| 6863 | intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) | 7088 | intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) |
| 6864 | { | 7089 | { |
| @@ -7064,8 +7289,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) | |||
| 7064 | intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); | 7289 | intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); |
| 7065 | } | 7290 | } |
| 7066 | 7291 | ||
| 7067 | static void vlv_update_pll(struct intel_crtc *crtc, | 7292 | static void vlv_compute_dpll(struct intel_crtc *crtc, |
| 7068 | struct intel_crtc_state *pipe_config) | 7293 | struct intel_crtc_state *pipe_config) |
| 7069 | { | 7294 | { |
| 7070 | u32 dpll, dpll_md; | 7295 | u32 dpll, dpll_md; |
| 7071 | 7296 | ||
| @@ -7074,8 +7299,8 @@ static void vlv_update_pll(struct intel_crtc *crtc, | |||
| 7074 | * clock for pipe B, since VGA hotplug / manual detection depends | 7299 | * clock for pipe B, since VGA hotplug / manual detection depends |
| 7075 | * on it. | 7300 | * on it. |
| 7076 | */ | 7301 | */ |
| 7077 | dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | | 7302 | dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV | |
| 7078 | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; | 7303 | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV; |
| 7079 | /* We should never disable this, set it here for state tracking */ | 7304 | /* We should never disable this, set it here for state tracking */ |
| 7080 | if (crtc->pipe == PIPE_B) | 7305 | if (crtc->pipe == PIPE_B) |
| 7081 | dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; | 7306 | dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; |
| @@ -7178,11 +7403,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, | |||
| 7178 | mutex_unlock(&dev_priv->sb_lock); | 7403 | mutex_unlock(&dev_priv->sb_lock); |
| 7179 | } | 7404 | } |
| 7180 | 7405 | ||
| 7181 | static void chv_update_pll(struct intel_crtc *crtc, | 7406 | static void chv_compute_dpll(struct intel_crtc *crtc, |
| 7182 | struct intel_crtc_state *pipe_config) | 7407 | struct intel_crtc_state *pipe_config) |
| 7183 | { | 7408 | { |
| 7184 | pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | | 7409 | pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | |
| 7185 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | | 7410 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | |
| 7186 | DPLL_VCO_ENABLE; | 7411 | DPLL_VCO_ENABLE; |
| 7187 | if (crtc->pipe != PIPE_A) | 7412 | if (crtc->pipe != PIPE_A) |
| 7188 | pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; | 7413 | pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; |
| @@ -7318,11 +7543,11 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, | |||
| 7318 | }; | 7543 | }; |
| 7319 | 7544 | ||
| 7320 | if (IS_CHERRYVIEW(dev)) { | 7545 | if (IS_CHERRYVIEW(dev)) { |
| 7321 | chv_update_pll(crtc, &pipe_config); | 7546 | chv_compute_dpll(crtc, &pipe_config); |
| 7322 | chv_prepare_pll(crtc, &pipe_config); | 7547 | chv_prepare_pll(crtc, &pipe_config); |
| 7323 | chv_enable_pll(crtc, &pipe_config); | 7548 | chv_enable_pll(crtc, &pipe_config); |
| 7324 | } else { | 7549 | } else { |
| 7325 | vlv_update_pll(crtc, &pipe_config); | 7550 | vlv_compute_dpll(crtc, &pipe_config); |
| 7326 | vlv_prepare_pll(crtc, &pipe_config); | 7551 | vlv_prepare_pll(crtc, &pipe_config); |
| 7327 | vlv_enable_pll(crtc, &pipe_config); | 7552 | vlv_enable_pll(crtc, &pipe_config); |
| 7328 | } | 7553 | } |
| @@ -7344,10 +7569,10 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe) | |||
| 7344 | vlv_disable_pll(to_i915(dev), pipe); | 7569 | vlv_disable_pll(to_i915(dev), pipe); |
| 7345 | } | 7570 | } |
| 7346 | 7571 | ||
| 7347 | static void i9xx_update_pll(struct intel_crtc *crtc, | 7572 | static void i9xx_compute_dpll(struct intel_crtc *crtc, |
| 7348 | struct intel_crtc_state *crtc_state, | 7573 | struct intel_crtc_state *crtc_state, |
| 7349 | intel_clock_t *reduced_clock, | 7574 | intel_clock_t *reduced_clock, |
| 7350 | int num_connectors) | 7575 | int num_connectors) |
| 7351 | { | 7576 | { |
| 7352 | struct drm_device *dev = crtc->base.dev; | 7577 | struct drm_device *dev = crtc->base.dev; |
| 7353 | struct drm_i915_private *dev_priv = dev->dev_private; | 7578 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -7421,10 +7646,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc, | |||
| 7421 | } | 7646 | } |
| 7422 | } | 7647 | } |
| 7423 | 7648 | ||
| 7424 | static void i8xx_update_pll(struct intel_crtc *crtc, | 7649 | static void i8xx_compute_dpll(struct intel_crtc *crtc, |
| 7425 | struct intel_crtc_state *crtc_state, | 7650 | struct intel_crtc_state *crtc_state, |
| 7426 | intel_clock_t *reduced_clock, | 7651 | intel_clock_t *reduced_clock, |
| 7427 | int num_connectors) | 7652 | int num_connectors) |
| 7428 | { | 7653 | { |
| 7429 | struct drm_device *dev = crtc->base.dev; | 7654 | struct drm_device *dev = crtc->base.dev; |
| 7430 | struct drm_i915_private *dev_priv = dev->dev_private; | 7655 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -7584,9 +7809,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, | |||
| 7584 | mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; | 7809 | mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; |
| 7585 | 7810 | ||
| 7586 | mode->flags = pipe_config->base.adjusted_mode.flags; | 7811 | mode->flags = pipe_config->base.adjusted_mode.flags; |
| 7812 | mode->type = DRM_MODE_TYPE_DRIVER; | ||
| 7587 | 7813 | ||
| 7588 | mode->clock = pipe_config->base.adjusted_mode.crtc_clock; | 7814 | mode->clock = pipe_config->base.adjusted_mode.crtc_clock; |
| 7589 | mode->flags |= pipe_config->base.adjusted_mode.flags; | 7815 | mode->flags |= pipe_config->base.adjusted_mode.flags; |
| 7816 | |||
| 7817 | mode->hsync = drm_mode_hsync(mode); | ||
| 7818 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
| 7819 | drm_mode_set_name(mode); | ||
| 7590 | } | 7820 | } |
| 7591 | 7821 | ||
| 7592 | static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | 7822 | static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) |
| @@ -7658,9 +7888,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, | |||
| 7658 | struct drm_device *dev = crtc->base.dev; | 7888 | struct drm_device *dev = crtc->base.dev; |
| 7659 | struct drm_i915_private *dev_priv = dev->dev_private; | 7889 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 7660 | int refclk, num_connectors = 0; | 7890 | int refclk, num_connectors = 0; |
| 7661 | intel_clock_t clock, reduced_clock; | 7891 | intel_clock_t clock; |
| 7662 | bool ok, has_reduced_clock = false; | 7892 | bool ok; |
| 7663 | bool is_lvds = false, is_dsi = false; | 7893 | bool is_dsi = false; |
| 7664 | struct intel_encoder *encoder; | 7894 | struct intel_encoder *encoder; |
| 7665 | const intel_limit_t *limit; | 7895 | const intel_limit_t *limit; |
| 7666 | struct drm_atomic_state *state = crtc_state->base.state; | 7896 | struct drm_atomic_state *state = crtc_state->base.state; |
| @@ -7678,9 +7908,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, | |||
| 7678 | encoder = to_intel_encoder(connector_state->best_encoder); | 7908 | encoder = to_intel_encoder(connector_state->best_encoder); |
| 7679 | 7909 | ||
| 7680 | switch (encoder->type) { | 7910 | switch (encoder->type) { |
| 7681 | case INTEL_OUTPUT_LVDS: | ||
| 7682 | is_lvds = true; | ||
| 7683 | break; | ||
| 7684 | case INTEL_OUTPUT_DSI: | 7911 | case INTEL_OUTPUT_DSI: |
| 7685 | is_dsi = true; | 7912 | is_dsi = true; |
| 7686 | break; | 7913 | break; |
| @@ -7712,19 +7939,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, | |||
| 7712 | return -EINVAL; | 7939 | return -EINVAL; |
| 7713 | } | 7940 | } |
| 7714 | 7941 | ||
| 7715 | if (is_lvds && dev_priv->lvds_downclock_avail) { | ||
| 7716 | /* | ||
| 7717 | * Ensure we match the reduced clock's P to the target | ||
| 7718 | * clock. If the clocks don't match, we can't switch | ||
| 7719 | * the display clock by using the FP0/FP1. In such case | ||
| 7720 | * we will disable the LVDS downclock feature. | ||
| 7721 | */ | ||
| 7722 | has_reduced_clock = | ||
| 7723 | dev_priv->display.find_dpll(limit, crtc_state, | ||
| 7724 | dev_priv->lvds_downclock, | ||
| 7725 | refclk, &clock, | ||
| 7726 | &reduced_clock); | ||
| 7727 | } | ||
| 7728 | /* Compat-code for transition, will disappear. */ | 7942 | /* Compat-code for transition, will disappear. */ |
| 7729 | crtc_state->dpll.n = clock.n; | 7943 | crtc_state->dpll.n = clock.n; |
| 7730 | crtc_state->dpll.m1 = clock.m1; | 7944 | crtc_state->dpll.m1 = clock.m1; |
| @@ -7734,17 +7948,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, | |||
| 7734 | } | 7948 | } |
| 7735 | 7949 | ||
| 7736 | if (IS_GEN2(dev)) { | 7950 | if (IS_GEN2(dev)) { |
| 7737 | i8xx_update_pll(crtc, crtc_state, | 7951 | i8xx_compute_dpll(crtc, crtc_state, NULL, |
| 7738 | has_reduced_clock ? &reduced_clock : NULL, | 7952 | num_connectors); |
| 7739 | num_connectors); | ||
| 7740 | } else if (IS_CHERRYVIEW(dev)) { | 7953 | } else if (IS_CHERRYVIEW(dev)) { |
| 7741 | chv_update_pll(crtc, crtc_state); | 7954 | chv_compute_dpll(crtc, crtc_state); |
| 7742 | } else if (IS_VALLEYVIEW(dev)) { | 7955 | } else if (IS_VALLEYVIEW(dev)) { |
| 7743 | vlv_update_pll(crtc, crtc_state); | 7956 | vlv_compute_dpll(crtc, crtc_state); |
| 7744 | } else { | 7957 | } else { |
| 7745 | i9xx_update_pll(crtc, crtc_state, | 7958 | i9xx_compute_dpll(crtc, crtc_state, NULL, |
| 7746 | has_reduced_clock ? &reduced_clock : NULL, | 7959 | num_connectors); |
| 7747 | num_connectors); | ||
| 7748 | } | 7960 | } |
| 7749 | 7961 | ||
| 7750 | return 0; | 7962 | return 0; |
| @@ -7804,10 +8016,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, | |||
| 7804 | clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; | 8016 | clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; |
| 7805 | clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; | 8017 | clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; |
| 7806 | 8018 | ||
| 7807 | vlv_clock(refclk, &clock); | 8019 | pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); |
| 7808 | |||
| 7809 | /* clock.dot is the fast clock */ | ||
| 7810 | pipe_config->port_clock = clock.dot / 5; | ||
| 7811 | } | 8020 | } |
| 7812 | 8021 | ||
| 7813 | static void | 8022 | static void |
| @@ -7906,10 +8115,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, | |||
| 7906 | clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; | 8115 | clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; |
| 7907 | clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; | 8116 | clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; |
| 7908 | 8117 | ||
| 7909 | chv_clock(refclk, &clock); | 8118 | pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); |
| 7910 | |||
| 7911 | /* clock.dot is the fast clock */ | ||
| 7912 | pipe_config->port_clock = clock.dot / 5; | ||
| 7913 | } | 8119 | } |
| 7914 | 8120 | ||
| 7915 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | 8121 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, |
| @@ -8558,9 +8764,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, | |||
| 8558 | struct drm_i915_private *dev_priv = dev->dev_private; | 8764 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 8559 | int refclk; | 8765 | int refclk; |
| 8560 | const intel_limit_t *limit; | 8766 | const intel_limit_t *limit; |
| 8561 | bool ret, is_lvds = false; | 8767 | bool ret; |
| 8562 | |||
| 8563 | is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); | ||
| 8564 | 8768 | ||
| 8565 | refclk = ironlake_get_refclk(crtc_state); | 8769 | refclk = ironlake_get_refclk(crtc_state); |
| 8566 | 8770 | ||
| @@ -8576,20 +8780,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, | |||
| 8576 | if (!ret) | 8780 | if (!ret) |
| 8577 | return false; | 8781 | return false; |
| 8578 | 8782 | ||
| 8579 | if (is_lvds && dev_priv->lvds_downclock_avail) { | ||
| 8580 | /* | ||
| 8581 | * Ensure we match the reduced clock's P to the target clock. | ||
| 8582 | * If the clocks don't match, we can't switch the display clock | ||
| 8583 | * by using the FP0/FP1. In such case we will disable the LVDS | ||
| 8584 | * downclock feature. | ||
| 8585 | */ | ||
| 8586 | *has_reduced_clock = | ||
| 8587 | dev_priv->display.find_dpll(limit, crtc_state, | ||
| 8588 | dev_priv->lvds_downclock, | ||
| 8589 | refclk, clock, | ||
| 8590 | reduced_clock); | ||
| 8591 | } | ||
| 8592 | |||
| 8593 | return true; | 8783 | return true; |
| 8594 | } | 8784 | } |
| 8595 | 8785 | ||
| @@ -9297,6 +9487,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
| 9297 | } | 9487 | } |
| 9298 | 9488 | ||
| 9299 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 9489 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
| 9490 | intel_update_cdclk(dev_priv->dev); | ||
| 9300 | } | 9491 | } |
| 9301 | 9492 | ||
| 9302 | /* | 9493 | /* |
| @@ -9358,21 +9549,160 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) | |||
| 9358 | intel_prepare_ddi(dev); | 9549 | intel_prepare_ddi(dev); |
| 9359 | } | 9550 | } |
| 9360 | 9551 | ||
| 9361 | static void broxton_modeset_global_resources(struct drm_atomic_state *old_state) | 9552 | static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) |
| 9362 | { | 9553 | { |
| 9363 | struct drm_device *dev = old_state->dev; | 9554 | struct drm_device *dev = old_state->dev; |
| 9555 | unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; | ||
| 9556 | |||
| 9557 | broxton_set_cdclk(dev, req_cdclk); | ||
| 9558 | } | ||
| 9559 | |||
| 9560 | /* compute the max rate for new configuration */ | ||
| 9561 | static int ilk_max_pixel_rate(struct drm_atomic_state *state) | ||
| 9562 | { | ||
| 9563 | struct intel_crtc *intel_crtc; | ||
| 9564 | struct intel_crtc_state *crtc_state; | ||
| 9565 | int max_pixel_rate = 0; | ||
| 9566 | |||
| 9567 | for_each_intel_crtc(state->dev, intel_crtc) { | ||
| 9568 | int pixel_rate; | ||
| 9569 | |||
| 9570 | crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); | ||
| 9571 | if (IS_ERR(crtc_state)) | ||
| 9572 | return PTR_ERR(crtc_state); | ||
| 9573 | |||
| 9574 | if (!crtc_state->base.enable) | ||
| 9575 | continue; | ||
| 9576 | |||
| 9577 | pixel_rate = ilk_pipe_pixel_rate(crtc_state); | ||
| 9578 | |||
| 9579 | /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ | ||
| 9580 | if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled) | ||
| 9581 | pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); | ||
| 9582 | |||
| 9583 | max_pixel_rate = max(max_pixel_rate, pixel_rate); | ||
| 9584 | } | ||
| 9585 | |||
| 9586 | return max_pixel_rate; | ||
| 9587 | } | ||
| 9588 | |||
| 9589 | static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) | ||
| 9590 | { | ||
| 9364 | struct drm_i915_private *dev_priv = dev->dev_private; | 9591 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 9365 | int max_pixclk = intel_mode_max_pixclk(dev, NULL); | 9592 | uint32_t val, data; |
| 9366 | int req_cdclk; | 9593 | int ret; |
| 9367 | 9594 | ||
| 9368 | /* see the comment in valleyview_modeset_global_resources */ | 9595 | if (WARN((I915_READ(LCPLL_CTL) & |
| 9369 | if (WARN_ON(max_pixclk < 0)) | 9596 | (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | |
| 9597 | LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | | ||
| 9598 | LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | | ||
| 9599 | LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, | ||
| 9600 | "trying to change cdclk frequency with cdclk not enabled\n")) | ||
| 9370 | return; | 9601 | return; |
| 9371 | 9602 | ||
| 9372 | req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); | 9603 | mutex_lock(&dev_priv->rps.hw_lock); |
| 9604 | ret = sandybridge_pcode_write(dev_priv, | ||
| 9605 | BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); | ||
| 9606 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
| 9607 | if (ret) { | ||
| 9608 | DRM_ERROR("failed to inform pcode about cdclk change\n"); | ||
| 9609 | return; | ||
| 9610 | } | ||
| 9373 | 9611 | ||
| 9374 | if (req_cdclk != dev_priv->cdclk_freq) | 9612 | val = I915_READ(LCPLL_CTL); |
| 9375 | broxton_set_cdclk(dev, req_cdclk); | 9613 | val |= LCPLL_CD_SOURCE_FCLK; |
| 9614 | I915_WRITE(LCPLL_CTL, val); | ||
| 9615 | |||
| 9616 | if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & | ||
| 9617 | LCPLL_CD_SOURCE_FCLK_DONE, 1)) | ||
| 9618 | DRM_ERROR("Switching to FCLK failed\n"); | ||
| 9619 | |||
| 9620 | val = I915_READ(LCPLL_CTL); | ||
| 9621 | val &= ~LCPLL_CLK_FREQ_MASK; | ||
| 9622 | |||
| 9623 | switch (cdclk) { | ||
| 9624 | case 450000: | ||
| 9625 | val |= LCPLL_CLK_FREQ_450; | ||
| 9626 | data = 0; | ||
| 9627 | break; | ||
| 9628 | case 540000: | ||
| 9629 | val |= LCPLL_CLK_FREQ_54O_BDW; | ||
| 9630 | data = 1; | ||
| 9631 | break; | ||
| 9632 | case 337500: | ||
| 9633 | val |= LCPLL_CLK_FREQ_337_5_BDW; | ||
| 9634 | data = 2; | ||
| 9635 | break; | ||
| 9636 | case 675000: | ||
| 9637 | val |= LCPLL_CLK_FREQ_675_BDW; | ||
| 9638 | data = 3; | ||
| 9639 | break; | ||
| 9640 | default: | ||
| 9641 | WARN(1, "invalid cdclk frequency\n"); | ||
| 9642 | return; | ||
| 9643 | } | ||
| 9644 | |||
| 9645 | I915_WRITE(LCPLL_CTL, val); | ||
| 9646 | |||
| 9647 | val = I915_READ(LCPLL_CTL); | ||
| 9648 | val &= ~LCPLL_CD_SOURCE_FCLK; | ||
| 9649 | I915_WRITE(LCPLL_CTL, val); | ||
| 9650 | |||
| 9651 | if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & | ||
| 9652 | LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) | ||
| 9653 | DRM_ERROR("Switching back to LCPLL failed\n"); | ||
| 9654 | |||
| 9655 | mutex_lock(&dev_priv->rps.hw_lock); | ||
| 9656 | sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); | ||
| 9657 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
| 9658 | |||
| 9659 | intel_update_cdclk(dev); | ||
| 9660 | |||
| 9661 | WARN(cdclk != dev_priv->cdclk_freq, | ||
| 9662 | "cdclk requested %d kHz but got %d kHz\n", | ||
| 9663 | cdclk, dev_priv->cdclk_freq); | ||
| 9664 | } | ||
| 9665 | |||
| 9666 | static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) | ||
| 9667 | { | ||
| 9668 | struct drm_i915_private *dev_priv = to_i915(state->dev); | ||
| 9669 | int max_pixclk = ilk_max_pixel_rate(state); | ||
| 9670 | int cdclk; | ||
| 9671 | |||
| 9672 | /* | ||
| 9673 | * FIXME should also account for plane ratio | ||
| 9674 | * once 64bpp pixel formats are supported. | ||
| 9675 | */ | ||
| 9676 | if (max_pixclk > 540000) | ||
| 9677 | cdclk = 675000; | ||
| 9678 | else if (max_pixclk > 450000) | ||
| 9679 | cdclk = 540000; | ||
| 9680 | else if (max_pixclk > 337500) | ||
| 9681 | cdclk = 450000; | ||
| 9682 | else | ||
| 9683 | cdclk = 337500; | ||
| 9684 | |||
| 9685 | /* | ||
| 9686 | * FIXME move the cdclk caclulation to | ||
| 9687 | * compute_config() so we can fail gracegully. | ||
| 9688 | */ | ||
| 9689 | if (cdclk > dev_priv->max_cdclk_freq) { | ||
| 9690 | DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", | ||
| 9691 | cdclk, dev_priv->max_cdclk_freq); | ||
| 9692 | cdclk = dev_priv->max_cdclk_freq; | ||
| 9693 | } | ||
| 9694 | |||
| 9695 | to_intel_atomic_state(state)->cdclk = cdclk; | ||
| 9696 | |||
| 9697 | return 0; | ||
| 9698 | } | ||
| 9699 | |||
| 9700 | static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) | ||
| 9701 | { | ||
| 9702 | struct drm_device *dev = old_state->dev; | ||
| 9703 | unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; | ||
| 9704 | |||
| 9705 | broadwell_set_cdclk(dev, req_cdclk); | ||
| 9376 | } | 9706 | } |
| 9377 | 9707 | ||
| 9378 | static int haswell_crtc_compute_clock(struct intel_crtc *crtc, | 9708 | static int haswell_crtc_compute_clock(struct intel_crtc *crtc, |
| @@ -9978,7 +10308,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, | |||
| 9978 | retry: | 10308 | retry: |
| 9979 | ret = drm_modeset_lock(&config->connection_mutex, ctx); | 10309 | ret = drm_modeset_lock(&config->connection_mutex, ctx); |
| 9980 | if (ret) | 10310 | if (ret) |
| 9981 | goto fail_unlock; | 10311 | goto fail; |
| 9982 | 10312 | ||
| 9983 | /* | 10313 | /* |
| 9984 | * Algorithm gets a little messy: | 10314 | * Algorithm gets a little messy: |
| @@ -9996,10 +10326,10 @@ retry: | |||
| 9996 | 10326 | ||
| 9997 | ret = drm_modeset_lock(&crtc->mutex, ctx); | 10327 | ret = drm_modeset_lock(&crtc->mutex, ctx); |
| 9998 | if (ret) | 10328 | if (ret) |
| 9999 | goto fail_unlock; | 10329 | goto fail; |
| 10000 | ret = drm_modeset_lock(&crtc->primary->mutex, ctx); | 10330 | ret = drm_modeset_lock(&crtc->primary->mutex, ctx); |
| 10001 | if (ret) | 10331 | if (ret) |
| 10002 | goto fail_unlock; | 10332 | goto fail; |
| 10003 | 10333 | ||
| 10004 | old->dpms_mode = connector->dpms; | 10334 | old->dpms_mode = connector->dpms; |
| 10005 | old->load_detect_temp = false; | 10335 | old->load_detect_temp = false; |
| @@ -10018,9 +10348,6 @@ retry: | |||
| 10018 | continue; | 10348 | continue; |
| 10019 | if (possible_crtc->state->enable) | 10349 | if (possible_crtc->state->enable) |
| 10020 | continue; | 10350 | continue; |
| 10021 | /* This can occur when applying the pipe A quirk on resume. */ | ||
| 10022 | if (to_intel_crtc(possible_crtc)->new_enabled) | ||
| 10023 | continue; | ||
| 10024 | 10351 | ||
| 10025 | crtc = possible_crtc; | 10352 | crtc = possible_crtc; |
| 10026 | break; | 10353 | break; |
| @@ -10031,20 +10358,17 @@ retry: | |||
| 10031 | */ | 10358 | */ |
| 10032 | if (!crtc) { | 10359 | if (!crtc) { |
| 10033 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); | 10360 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
| 10034 | goto fail_unlock; | 10361 | goto fail; |
| 10035 | } | 10362 | } |
| 10036 | 10363 | ||
| 10037 | ret = drm_modeset_lock(&crtc->mutex, ctx); | 10364 | ret = drm_modeset_lock(&crtc->mutex, ctx); |
| 10038 | if (ret) | 10365 | if (ret) |
| 10039 | goto fail_unlock; | 10366 | goto fail; |
| 10040 | ret = drm_modeset_lock(&crtc->primary->mutex, ctx); | 10367 | ret = drm_modeset_lock(&crtc->primary->mutex, ctx); |
| 10041 | if (ret) | 10368 | if (ret) |
| 10042 | goto fail_unlock; | 10369 | goto fail; |
| 10043 | intel_encoder->new_crtc = to_intel_crtc(crtc); | ||
| 10044 | to_intel_connector(connector)->new_encoder = intel_encoder; | ||
| 10045 | 10370 | ||
| 10046 | intel_crtc = to_intel_crtc(crtc); | 10371 | intel_crtc = to_intel_crtc(crtc); |
| 10047 | intel_crtc->new_enabled = true; | ||
| 10048 | old->dpms_mode = connector->dpms; | 10372 | old->dpms_mode = connector->dpms; |
| 10049 | old->load_detect_temp = true; | 10373 | old->load_detect_temp = true; |
| 10050 | old->release_fb = NULL; | 10374 | old->release_fb = NULL; |
| @@ -10100,7 +10424,7 @@ retry: | |||
| 10100 | 10424 | ||
| 10101 | drm_mode_copy(&crtc_state->base.mode, mode); | 10425 | drm_mode_copy(&crtc_state->base.mode, mode); |
| 10102 | 10426 | ||
| 10103 | if (intel_set_mode(crtc, state, true)) { | 10427 | if (drm_atomic_commit(state)) { |
| 10104 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); | 10428 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
| 10105 | if (old->release_fb) | 10429 | if (old->release_fb) |
| 10106 | old->release_fb->funcs->destroy(old->release_fb); | 10430 | old->release_fb->funcs->destroy(old->release_fb); |
| @@ -10112,9 +10436,7 @@ retry: | |||
| 10112 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 10436 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
| 10113 | return true; | 10437 | return true; |
| 10114 | 10438 | ||
| 10115 | fail: | 10439 | fail: |
| 10116 | intel_crtc->new_enabled = crtc->state->enable; | ||
| 10117 | fail_unlock: | ||
| 10118 | drm_atomic_state_free(state); | 10440 | drm_atomic_state_free(state); |
| 10119 | state = NULL; | 10441 | state = NULL; |
| 10120 | 10442 | ||
| @@ -10160,10 +10482,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, | |||
| 10160 | if (IS_ERR(crtc_state)) | 10482 | if (IS_ERR(crtc_state)) |
| 10161 | goto fail; | 10483 | goto fail; |
| 10162 | 10484 | ||
| 10163 | to_intel_connector(connector)->new_encoder = NULL; | ||
| 10164 | intel_encoder->new_crtc = NULL; | ||
| 10165 | intel_crtc->new_enabled = false; | ||
| 10166 | |||
| 10167 | connector_state->best_encoder = NULL; | 10485 | connector_state->best_encoder = NULL; |
| 10168 | connector_state->crtc = NULL; | 10486 | connector_state->crtc = NULL; |
| 10169 | 10487 | ||
| @@ -10174,7 +10492,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, | |||
| 10174 | if (ret) | 10492 | if (ret) |
| 10175 | goto fail; | 10493 | goto fail; |
| 10176 | 10494 | ||
| 10177 | ret = intel_set_mode(crtc, state, true); | 10495 | ret = drm_atomic_commit(state); |
| 10178 | if (ret) | 10496 | if (ret) |
| 10179 | goto fail; | 10497 | goto fail; |
| 10180 | 10498 | ||
| @@ -10222,6 +10540,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | |||
| 10222 | u32 dpll = pipe_config->dpll_hw_state.dpll; | 10540 | u32 dpll = pipe_config->dpll_hw_state.dpll; |
| 10223 | u32 fp; | 10541 | u32 fp; |
| 10224 | intel_clock_t clock; | 10542 | intel_clock_t clock; |
| 10543 | int port_clock; | ||
| 10225 | int refclk = i9xx_pll_refclk(dev, pipe_config); | 10544 | int refclk = i9xx_pll_refclk(dev, pipe_config); |
| 10226 | 10545 | ||
| 10227 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | 10546 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
| @@ -10262,9 +10581,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | |||
| 10262 | } | 10581 | } |
| 10263 | 10582 | ||
| 10264 | if (IS_PINEVIEW(dev)) | 10583 | if (IS_PINEVIEW(dev)) |
| 10265 | pineview_clock(refclk, &clock); | 10584 | port_clock = pnv_calc_dpll_params(refclk, &clock); |
| 10266 | else | 10585 | else |
| 10267 | i9xx_clock(refclk, &clock); | 10586 | port_clock = i9xx_calc_dpll_params(refclk, &clock); |
| 10268 | } else { | 10587 | } else { |
| 10269 | u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); | 10588 | u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); |
| 10270 | bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); | 10589 | bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); |
| @@ -10290,7 +10609,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | |||
| 10290 | clock.p2 = 2; | 10609 | clock.p2 = 2; |
| 10291 | } | 10610 | } |
| 10292 | 10611 | ||
| 10293 | i9xx_clock(refclk, &clock); | 10612 | port_clock = i9xx_calc_dpll_params(refclk, &clock); |
| 10294 | } | 10613 | } |
| 10295 | 10614 | ||
| 10296 | /* | 10615 | /* |
| @@ -10298,7 +10617,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | |||
| 10298 | * port_clock to compute adjusted_mode.crtc_clock in the | 10617 | * port_clock to compute adjusted_mode.crtc_clock in the |
| 10299 | * encoder's get_config() function. | 10618 | * encoder's get_config() function. |
| 10300 | */ | 10619 | */ |
| 10301 | pipe_config->port_clock = clock.dot; | 10620 | pipe_config->port_clock = port_clock; |
| 10302 | } | 10621 | } |
| 10303 | 10622 | ||
| 10304 | int intel_dotclock_calculate(int link_freq, | 10623 | int intel_dotclock_calculate(int link_freq, |
| @@ -10387,42 +10706,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 10387 | return mode; | 10706 | return mode; |
| 10388 | } | 10707 | } |
| 10389 | 10708 | ||
| 10390 | static void intel_decrease_pllclock(struct drm_crtc *crtc) | ||
| 10391 | { | ||
| 10392 | struct drm_device *dev = crtc->dev; | ||
| 10393 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 10394 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 10395 | |||
| 10396 | if (!HAS_GMCH_DISPLAY(dev)) | ||
| 10397 | return; | ||
| 10398 | |||
| 10399 | if (!dev_priv->lvds_downclock_avail) | ||
| 10400 | return; | ||
| 10401 | |||
| 10402 | /* | ||
| 10403 | * Since this is called by a timer, we should never get here in | ||
| 10404 | * the manual case. | ||
| 10405 | */ | ||
| 10406 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { | ||
| 10407 | int pipe = intel_crtc->pipe; | ||
| 10408 | int dpll_reg = DPLL(pipe); | ||
| 10409 | int dpll; | ||
| 10410 | |||
| 10411 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | ||
| 10412 | |||
| 10413 | assert_panel_unlocked(dev_priv, pipe); | ||
| 10414 | |||
| 10415 | dpll = I915_READ(dpll_reg); | ||
| 10416 | dpll |= DISPLAY_RATE_SELECT_FPA1; | ||
| 10417 | I915_WRITE(dpll_reg, dpll); | ||
| 10418 | intel_wait_for_vblank(dev, pipe); | ||
| 10419 | dpll = I915_READ(dpll_reg); | ||
| 10420 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | ||
| 10421 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); | ||
| 10422 | } | ||
| 10423 | |||
| 10424 | } | ||
| 10425 | |||
| 10426 | void intel_mark_busy(struct drm_device *dev) | 10709 | void intel_mark_busy(struct drm_device *dev) |
| 10427 | { | 10710 | { |
| 10428 | struct drm_i915_private *dev_priv = dev->dev_private; | 10711 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -10440,20 +10723,12 @@ void intel_mark_busy(struct drm_device *dev) | |||
| 10440 | void intel_mark_idle(struct drm_device *dev) | 10723 | void intel_mark_idle(struct drm_device *dev) |
| 10441 | { | 10724 | { |
| 10442 | struct drm_i915_private *dev_priv = dev->dev_private; | 10725 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 10443 | struct drm_crtc *crtc; | ||
| 10444 | 10726 | ||
| 10445 | if (!dev_priv->mm.busy) | 10727 | if (!dev_priv->mm.busy) |
| 10446 | return; | 10728 | return; |
| 10447 | 10729 | ||
| 10448 | dev_priv->mm.busy = false; | 10730 | dev_priv->mm.busy = false; |
| 10449 | 10731 | ||
| 10450 | for_each_crtc(dev, crtc) { | ||
| 10451 | if (!crtc->primary->fb) | ||
| 10452 | continue; | ||
| 10453 | |||
| 10454 | intel_decrease_pllclock(crtc); | ||
| 10455 | } | ||
| 10456 | |||
| 10457 | if (INTEL_INFO(dev)->gen >= 6) | 10732 | if (INTEL_INFO(dev)->gen >= 6) |
| 10458 | gen6_rps_idle(dev->dev_private); | 10733 | gen6_rps_idle(dev->dev_private); |
| 10459 | 10734 | ||
| @@ -10485,24 +10760,26 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
| 10485 | { | 10760 | { |
| 10486 | struct intel_unpin_work *work = | 10761 | struct intel_unpin_work *work = |
| 10487 | container_of(__work, struct intel_unpin_work, work); | 10762 | container_of(__work, struct intel_unpin_work, work); |
| 10488 | struct drm_device *dev = work->crtc->dev; | 10763 | struct intel_crtc *crtc = to_intel_crtc(work->crtc); |
| 10489 | enum pipe pipe = to_intel_crtc(work->crtc)->pipe; | 10764 | struct drm_device *dev = crtc->base.dev; |
| 10765 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 10766 | struct drm_plane *primary = crtc->base.primary; | ||
| 10490 | 10767 | ||
| 10491 | mutex_lock(&dev->struct_mutex); | 10768 | mutex_lock(&dev->struct_mutex); |
| 10492 | intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state); | 10769 | intel_unpin_fb_obj(work->old_fb, primary->state); |
| 10493 | drm_gem_object_unreference(&work->pending_flip_obj->base); | 10770 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
| 10494 | 10771 | ||
| 10495 | intel_fbc_update(dev); | 10772 | intel_fbc_update(dev_priv); |
| 10496 | 10773 | ||
| 10497 | if (work->flip_queued_req) | 10774 | if (work->flip_queued_req) |
| 10498 | i915_gem_request_assign(&work->flip_queued_req, NULL); | 10775 | i915_gem_request_assign(&work->flip_queued_req, NULL); |
| 10499 | mutex_unlock(&dev->struct_mutex); | 10776 | mutex_unlock(&dev->struct_mutex); |
| 10500 | 10777 | ||
| 10501 | intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | 10778 | intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); |
| 10502 | drm_framebuffer_unreference(work->old_fb); | 10779 | drm_framebuffer_unreference(work->old_fb); |
| 10503 | 10780 | ||
| 10504 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); | 10781 | BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); |
| 10505 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); | 10782 | atomic_dec(&crtc->unpin_work_count); |
| 10506 | 10783 | ||
| 10507 | kfree(work); | 10784 | kfree(work); |
| 10508 | } | 10785 | } |
| @@ -10635,14 +10912,15 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
| 10635 | struct drm_crtc *crtc, | 10912 | struct drm_crtc *crtc, |
| 10636 | struct drm_framebuffer *fb, | 10913 | struct drm_framebuffer *fb, |
| 10637 | struct drm_i915_gem_object *obj, | 10914 | struct drm_i915_gem_object *obj, |
| 10638 | struct intel_engine_cs *ring, | 10915 | struct drm_i915_gem_request *req, |
| 10639 | uint32_t flags) | 10916 | uint32_t flags) |
| 10640 | { | 10917 | { |
| 10918 | struct intel_engine_cs *ring = req->ring; | ||
| 10641 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10919 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10642 | u32 flip_mask; | 10920 | u32 flip_mask; |
| 10643 | int ret; | 10921 | int ret; |
| 10644 | 10922 | ||
| 10645 | ret = intel_ring_begin(ring, 6); | 10923 | ret = intel_ring_begin(req, 6); |
| 10646 | if (ret) | 10924 | if (ret) |
| 10647 | return ret; | 10925 | return ret; |
| 10648 | 10926 | ||
| @@ -10662,7 +10940,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
| 10662 | intel_ring_emit(ring, 0); /* aux display base address, unused */ | 10940 | intel_ring_emit(ring, 0); /* aux display base address, unused */ |
| 10663 | 10941 | ||
| 10664 | intel_mark_page_flip_active(intel_crtc); | 10942 | intel_mark_page_flip_active(intel_crtc); |
| 10665 | __intel_ring_advance(ring); | ||
| 10666 | return 0; | 10943 | return 0; |
| 10667 | } | 10944 | } |
| 10668 | 10945 | ||
| @@ -10670,14 +10947,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | |||
| 10670 | struct drm_crtc *crtc, | 10947 | struct drm_crtc *crtc, |
| 10671 | struct drm_framebuffer *fb, | 10948 | struct drm_framebuffer *fb, |
| 10672 | struct drm_i915_gem_object *obj, | 10949 | struct drm_i915_gem_object *obj, |
| 10673 | struct intel_engine_cs *ring, | 10950 | struct drm_i915_gem_request *req, |
| 10674 | uint32_t flags) | 10951 | uint32_t flags) |
| 10675 | { | 10952 | { |
| 10953 | struct intel_engine_cs *ring = req->ring; | ||
| 10676 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10954 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10677 | u32 flip_mask; | 10955 | u32 flip_mask; |
| 10678 | int ret; | 10956 | int ret; |
| 10679 | 10957 | ||
| 10680 | ret = intel_ring_begin(ring, 6); | 10958 | ret = intel_ring_begin(req, 6); |
| 10681 | if (ret) | 10959 | if (ret) |
| 10682 | return ret; | 10960 | return ret; |
| 10683 | 10961 | ||
| @@ -10694,7 +10972,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | |||
| 10694 | intel_ring_emit(ring, MI_NOOP); | 10972 | intel_ring_emit(ring, MI_NOOP); |
| 10695 | 10973 | ||
| 10696 | intel_mark_page_flip_active(intel_crtc); | 10974 | intel_mark_page_flip_active(intel_crtc); |
| 10697 | __intel_ring_advance(ring); | ||
| 10698 | return 0; | 10975 | return 0; |
| 10699 | } | 10976 | } |
| 10700 | 10977 | ||
| @@ -10702,15 +10979,16 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | |||
| 10702 | struct drm_crtc *crtc, | 10979 | struct drm_crtc *crtc, |
| 10703 | struct drm_framebuffer *fb, | 10980 | struct drm_framebuffer *fb, |
| 10704 | struct drm_i915_gem_object *obj, | 10981 | struct drm_i915_gem_object *obj, |
| 10705 | struct intel_engine_cs *ring, | 10982 | struct drm_i915_gem_request *req, |
| 10706 | uint32_t flags) | 10983 | uint32_t flags) |
| 10707 | { | 10984 | { |
| 10985 | struct intel_engine_cs *ring = req->ring; | ||
| 10708 | struct drm_i915_private *dev_priv = dev->dev_private; | 10986 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 10709 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10987 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10710 | uint32_t pf, pipesrc; | 10988 | uint32_t pf, pipesrc; |
| 10711 | int ret; | 10989 | int ret; |
| 10712 | 10990 | ||
| 10713 | ret = intel_ring_begin(ring, 4); | 10991 | ret = intel_ring_begin(req, 4); |
| 10714 | if (ret) | 10992 | if (ret) |
| 10715 | return ret; | 10993 | return ret; |
| 10716 | 10994 | ||
| @@ -10733,7 +11011,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | |||
| 10733 | intel_ring_emit(ring, pf | pipesrc); | 11011 | intel_ring_emit(ring, pf | pipesrc); |
| 10734 | 11012 | ||
| 10735 | intel_mark_page_flip_active(intel_crtc); | 11013 | intel_mark_page_flip_active(intel_crtc); |
| 10736 | __intel_ring_advance(ring); | ||
| 10737 | return 0; | 11014 | return 0; |
| 10738 | } | 11015 | } |
| 10739 | 11016 | ||
| @@ -10741,15 +11018,16 @@ static int intel_gen6_queue_flip(struct drm_device *dev, | |||
| 10741 | struct drm_crtc *crtc, | 11018 | struct drm_crtc *crtc, |
| 10742 | struct drm_framebuffer *fb, | 11019 | struct drm_framebuffer *fb, |
| 10743 | struct drm_i915_gem_object *obj, | 11020 | struct drm_i915_gem_object *obj, |
| 10744 | struct intel_engine_cs *ring, | 11021 | struct drm_i915_gem_request *req, |
| 10745 | uint32_t flags) | 11022 | uint32_t flags) |
| 10746 | { | 11023 | { |
| 11024 | struct intel_engine_cs *ring = req->ring; | ||
| 10747 | struct drm_i915_private *dev_priv = dev->dev_private; | 11025 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 10748 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11026 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10749 | uint32_t pf, pipesrc; | 11027 | uint32_t pf, pipesrc; |
| 10750 | int ret; | 11028 | int ret; |
| 10751 | 11029 | ||
| 10752 | ret = intel_ring_begin(ring, 4); | 11030 | ret = intel_ring_begin(req, 4); |
| 10753 | if (ret) | 11031 | if (ret) |
| 10754 | return ret; | 11032 | return ret; |
| 10755 | 11033 | ||
| @@ -10769,7 +11047,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev, | |||
| 10769 | intel_ring_emit(ring, pf | pipesrc); | 11047 | intel_ring_emit(ring, pf | pipesrc); |
| 10770 | 11048 | ||
| 10771 | intel_mark_page_flip_active(intel_crtc); | 11049 | intel_mark_page_flip_active(intel_crtc); |
| 10772 | __intel_ring_advance(ring); | ||
| 10773 | return 0; | 11050 | return 0; |
| 10774 | } | 11051 | } |
| 10775 | 11052 | ||
| @@ -10777,9 +11054,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 10777 | struct drm_crtc *crtc, | 11054 | struct drm_crtc *crtc, |
| 10778 | struct drm_framebuffer *fb, | 11055 | struct drm_framebuffer *fb, |
| 10779 | struct drm_i915_gem_object *obj, | 11056 | struct drm_i915_gem_object *obj, |
| 10780 | struct intel_engine_cs *ring, | 11057 | struct drm_i915_gem_request *req, |
| 10781 | uint32_t flags) | 11058 | uint32_t flags) |
| 10782 | { | 11059 | { |
| 11060 | struct intel_engine_cs *ring = req->ring; | ||
| 10783 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11061 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10784 | uint32_t plane_bit = 0; | 11062 | uint32_t plane_bit = 0; |
| 10785 | int len, ret; | 11063 | int len, ret; |
| @@ -10821,11 +11099,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 10821 | * then do the cacheline alignment, and finally emit the | 11099 | * then do the cacheline alignment, and finally emit the |
| 10822 | * MI_DISPLAY_FLIP. | 11100 | * MI_DISPLAY_FLIP. |
| 10823 | */ | 11101 | */ |
| 10824 | ret = intel_ring_cacheline_align(ring); | 11102 | ret = intel_ring_cacheline_align(req); |
| 10825 | if (ret) | 11103 | if (ret) |
| 10826 | return ret; | 11104 | return ret; |
| 10827 | 11105 | ||
| 10828 | ret = intel_ring_begin(ring, len); | 11106 | ret = intel_ring_begin(req, len); |
| 10829 | if (ret) | 11107 | if (ret) |
| 10830 | return ret; | 11108 | return ret; |
| 10831 | 11109 | ||
| @@ -10864,7 +11142,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 10864 | intel_ring_emit(ring, (MI_NOOP)); | 11142 | intel_ring_emit(ring, (MI_NOOP)); |
| 10865 | 11143 | ||
| 10866 | intel_mark_page_flip_active(intel_crtc); | 11144 | intel_mark_page_flip_active(intel_crtc); |
| 10867 | __intel_ring_advance(ring); | ||
| 10868 | return 0; | 11145 | return 0; |
| 10869 | } | 11146 | } |
| 10870 | 11147 | ||
| @@ -10973,12 +11250,11 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) | |||
| 10973 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | 11250 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) |
| 10974 | { | 11251 | { |
| 10975 | struct drm_device *dev = intel_crtc->base.dev; | 11252 | struct drm_device *dev = intel_crtc->base.dev; |
| 10976 | bool atomic_update; | ||
| 10977 | u32 start_vbl_count; | 11253 | u32 start_vbl_count; |
| 10978 | 11254 | ||
| 10979 | intel_mark_page_flip_active(intel_crtc); | 11255 | intel_mark_page_flip_active(intel_crtc); |
| 10980 | 11256 | ||
| 10981 | atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); | 11257 | intel_pipe_update_start(intel_crtc, &start_vbl_count); |
| 10982 | 11258 | ||
| 10983 | if (INTEL_INFO(dev)->gen >= 9) | 11259 | if (INTEL_INFO(dev)->gen >= 9) |
| 10984 | skl_do_mmio_flip(intel_crtc); | 11260 | skl_do_mmio_flip(intel_crtc); |
| @@ -10986,8 +11262,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | |||
| 10986 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ | 11262 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ |
| 10987 | ilk_do_mmio_flip(intel_crtc); | 11263 | ilk_do_mmio_flip(intel_crtc); |
| 10988 | 11264 | ||
| 10989 | if (atomic_update) | 11265 | intel_pipe_update_end(intel_crtc, start_vbl_count); |
| 10990 | intel_pipe_update_end(intel_crtc, start_vbl_count); | ||
| 10991 | } | 11266 | } |
| 10992 | 11267 | ||
| 10993 | static void intel_mmio_flip_work_func(struct work_struct *work) | 11268 | static void intel_mmio_flip_work_func(struct work_struct *work) |
| @@ -11034,7 +11309,7 @@ static int intel_default_queue_flip(struct drm_device *dev, | |||
| 11034 | struct drm_crtc *crtc, | 11309 | struct drm_crtc *crtc, |
| 11035 | struct drm_framebuffer *fb, | 11310 | struct drm_framebuffer *fb, |
| 11036 | struct drm_i915_gem_object *obj, | 11311 | struct drm_i915_gem_object *obj, |
| 11037 | struct intel_engine_cs *ring, | 11312 | struct drm_i915_gem_request *req, |
| 11038 | uint32_t flags) | 11313 | uint32_t flags) |
| 11039 | { | 11314 | { |
| 11040 | return -ENODEV; | 11315 | return -ENODEV; |
| @@ -11120,6 +11395,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 11120 | struct intel_unpin_work *work; | 11395 | struct intel_unpin_work *work; |
| 11121 | struct intel_engine_cs *ring; | 11396 | struct intel_engine_cs *ring; |
| 11122 | bool mmio_flip; | 11397 | bool mmio_flip; |
| 11398 | struct drm_i915_gem_request *request = NULL; | ||
| 11123 | int ret; | 11399 | int ret; |
| 11124 | 11400 | ||
| 11125 | /* | 11401 | /* |
| @@ -11226,7 +11502,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 11226 | */ | 11502 | */ |
| 11227 | ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, | 11503 | ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, |
| 11228 | crtc->primary->state, | 11504 | crtc->primary->state, |
| 11229 | mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring); | 11505 | mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request); |
| 11230 | if (ret) | 11506 | if (ret) |
| 11231 | goto cleanup_pending; | 11507 | goto cleanup_pending; |
| 11232 | 11508 | ||
| @@ -11242,31 +11518,34 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 11242 | i915_gem_request_assign(&work->flip_queued_req, | 11518 | i915_gem_request_assign(&work->flip_queued_req, |
| 11243 | obj->last_write_req); | 11519 | obj->last_write_req); |
| 11244 | } else { | 11520 | } else { |
| 11245 | if (obj->last_write_req) { | 11521 | if (!request) { |
| 11246 | ret = i915_gem_check_olr(obj->last_write_req); | 11522 | ret = i915_gem_request_alloc(ring, ring->default_context, &request); |
| 11247 | if (ret) | 11523 | if (ret) |
| 11248 | goto cleanup_unpin; | 11524 | goto cleanup_unpin; |
| 11249 | } | 11525 | } |
| 11250 | 11526 | ||
| 11251 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, | 11527 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, |
| 11252 | page_flip_flags); | 11528 | page_flip_flags); |
| 11253 | if (ret) | 11529 | if (ret) |
| 11254 | goto cleanup_unpin; | 11530 | goto cleanup_unpin; |
| 11255 | 11531 | ||
| 11256 | i915_gem_request_assign(&work->flip_queued_req, | 11532 | i915_gem_request_assign(&work->flip_queued_req, request); |
| 11257 | intel_ring_get_request(ring)); | ||
| 11258 | } | 11533 | } |
| 11259 | 11534 | ||
| 11535 | if (request) | ||
| 11536 | i915_add_request_no_flush(request); | ||
| 11537 | |||
| 11260 | work->flip_queued_vblank = drm_crtc_vblank_count(crtc); | 11538 | work->flip_queued_vblank = drm_crtc_vblank_count(crtc); |
| 11261 | work->enable_stall_check = true; | 11539 | work->enable_stall_check = true; |
| 11262 | 11540 | ||
| 11263 | i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, | 11541 | i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, |
| 11264 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | 11542 | to_intel_plane(primary)->frontbuffer_bit); |
| 11265 | |||
| 11266 | intel_fbc_disable(dev); | ||
| 11267 | intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
| 11268 | mutex_unlock(&dev->struct_mutex); | 11543 | mutex_unlock(&dev->struct_mutex); |
| 11269 | 11544 | ||
| 11545 | intel_fbc_disable(dev_priv); | ||
| 11546 | intel_frontbuffer_flip_prepare(dev, | ||
| 11547 | to_intel_plane(primary)->frontbuffer_bit); | ||
| 11548 | |||
| 11270 | trace_i915_flip_request(intel_crtc->plane, obj); | 11549 | trace_i915_flip_request(intel_crtc->plane, obj); |
| 11271 | 11550 | ||
| 11272 | return 0; | 11551 | return 0; |
| @@ -11274,6 +11553,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 11274 | cleanup_unpin: | 11553 | cleanup_unpin: |
| 11275 | intel_unpin_fb_obj(fb, crtc->primary->state); | 11554 | intel_unpin_fb_obj(fb, crtc->primary->state); |
| 11276 | cleanup_pending: | 11555 | cleanup_pending: |
| 11556 | if (request) | ||
| 11557 | i915_gem_request_cancel(request); | ||
| 11277 | atomic_dec(&intel_crtc->unpin_work_count); | 11558 | atomic_dec(&intel_crtc->unpin_work_count); |
| 11278 | mutex_unlock(&dev->struct_mutex); | 11559 | mutex_unlock(&dev->struct_mutex); |
| 11279 | cleanup: | 11560 | cleanup: |
| @@ -11292,8 +11573,35 @@ free_work: | |||
| 11292 | kfree(work); | 11573 | kfree(work); |
| 11293 | 11574 | ||
| 11294 | if (ret == -EIO) { | 11575 | if (ret == -EIO) { |
| 11576 | struct drm_atomic_state *state; | ||
| 11577 | struct drm_plane_state *plane_state; | ||
| 11578 | |||
| 11295 | out_hang: | 11579 | out_hang: |
| 11296 | ret = intel_plane_restore(primary); | 11580 | state = drm_atomic_state_alloc(dev); |
| 11581 | if (!state) | ||
| 11582 | return -ENOMEM; | ||
| 11583 | state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); | ||
| 11584 | |||
| 11585 | retry: | ||
| 11586 | plane_state = drm_atomic_get_plane_state(state, primary); | ||
| 11587 | ret = PTR_ERR_OR_ZERO(plane_state); | ||
| 11588 | if (!ret) { | ||
| 11589 | drm_atomic_set_fb_for_plane(plane_state, fb); | ||
| 11590 | |||
| 11591 | ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); | ||
| 11592 | if (!ret) | ||
| 11593 | ret = drm_atomic_commit(state); | ||
| 11594 | } | ||
| 11595 | |||
| 11596 | if (ret == -EDEADLK) { | ||
| 11597 | drm_modeset_backoff(state->acquire_ctx); | ||
| 11598 | drm_atomic_state_clear(state); | ||
| 11599 | goto retry; | ||
| 11600 | } | ||
| 11601 | |||
| 11602 | if (ret) | ||
| 11603 | drm_atomic_state_free(state); | ||
| 11604 | |||
| 11297 | if (ret == 0 && event) { | 11605 | if (ret == 0 && event) { |
| 11298 | spin_lock_irq(&dev->event_lock); | 11606 | spin_lock_irq(&dev->event_lock); |
| 11299 | drm_send_vblank_event(dev, pipe, event); | 11607 | drm_send_vblank_event(dev, pipe, event); |
| @@ -11303,44 +11611,278 @@ out_hang: | |||
| 11303 | return ret; | 11611 | return ret; |
| 11304 | } | 11612 | } |
| 11305 | 11613 | ||
| 11306 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | ||
| 11307 | .mode_set_base_atomic = intel_pipe_set_base_atomic, | ||
| 11308 | .load_lut = intel_crtc_load_lut, | ||
| 11309 | .atomic_begin = intel_begin_crtc_commit, | ||
| 11310 | .atomic_flush = intel_finish_crtc_commit, | ||
| 11311 | }; | ||
| 11312 | 11614 | ||
| 11313 | /** | 11615 | /** |
| 11314 | * intel_modeset_update_staged_output_state | 11616 | * intel_wm_need_update - Check whether watermarks need updating |
| 11617 | * @plane: drm plane | ||
| 11618 | * @state: new plane state | ||
| 11315 | * | 11619 | * |
| 11316 | * Updates the staged output configuration state, e.g. after we've read out the | 11620 | * Check current plane state versus the new one to determine whether |
| 11317 | * current hw state. | 11621 | * watermarks need to be recalculated. |
| 11622 | * | ||
| 11623 | * Returns true or false. | ||
| 11318 | */ | 11624 | */ |
| 11319 | static void intel_modeset_update_staged_output_state(struct drm_device *dev) | 11625 | static bool intel_wm_need_update(struct drm_plane *plane, |
| 11626 | struct drm_plane_state *state) | ||
| 11627 | { | ||
| 11628 | /* Update watermarks on tiling changes. */ | ||
| 11629 | if (!plane->state->fb || !state->fb || | ||
| 11630 | plane->state->fb->modifier[0] != state->fb->modifier[0] || | ||
| 11631 | plane->state->rotation != state->rotation) | ||
| 11632 | return true; | ||
| 11633 | |||
| 11634 | if (plane->state->crtc_w != state->crtc_w) | ||
| 11635 | return true; | ||
| 11636 | |||
| 11637 | return false; | ||
| 11638 | } | ||
| 11639 | |||
| 11640 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | ||
| 11641 | struct drm_plane_state *plane_state) | ||
| 11642 | { | ||
| 11643 | struct drm_crtc *crtc = crtc_state->crtc; | ||
| 11644 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 11645 | struct drm_plane *plane = plane_state->plane; | ||
| 11646 | struct drm_device *dev = crtc->dev; | ||
| 11647 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 11648 | struct intel_plane_state *old_plane_state = | ||
| 11649 | to_intel_plane_state(plane->state); | ||
| 11650 | int idx = intel_crtc->base.base.id, ret; | ||
| 11651 | int i = drm_plane_index(plane); | ||
| 11652 | bool mode_changed = needs_modeset(crtc_state); | ||
| 11653 | bool was_crtc_enabled = crtc->state->active; | ||
| 11654 | bool is_crtc_enabled = crtc_state->active; | ||
| 11655 | |||
| 11656 | bool turn_off, turn_on, visible, was_visible; | ||
| 11657 | struct drm_framebuffer *fb = plane_state->fb; | ||
| 11658 | |||
| 11659 | if (crtc_state && INTEL_INFO(dev)->gen >= 9 && | ||
| 11660 | plane->type != DRM_PLANE_TYPE_CURSOR) { | ||
| 11661 | ret = skl_update_scaler_plane( | ||
| 11662 | to_intel_crtc_state(crtc_state), | ||
| 11663 | to_intel_plane_state(plane_state)); | ||
| 11664 | if (ret) | ||
| 11665 | return ret; | ||
| 11666 | } | ||
| 11667 | |||
| 11668 | /* | ||
| 11669 | * Disabling a plane is always okay; we just need to update | ||
| 11670 | * fb tracking in a special way since cleanup_fb() won't | ||
| 11671 | * get called by the plane helpers. | ||
| 11672 | */ | ||
| 11673 | if (old_plane_state->base.fb && !fb) | ||
| 11674 | intel_crtc->atomic.disabled_planes |= 1 << i; | ||
| 11675 | |||
| 11676 | was_visible = old_plane_state->visible; | ||
| 11677 | visible = to_intel_plane_state(plane_state)->visible; | ||
| 11678 | |||
| 11679 | if (!was_crtc_enabled && WARN_ON(was_visible)) | ||
| 11680 | was_visible = false; | ||
| 11681 | |||
| 11682 | if (!is_crtc_enabled && WARN_ON(visible)) | ||
| 11683 | visible = false; | ||
| 11684 | |||
| 11685 | if (!was_visible && !visible) | ||
| 11686 | return 0; | ||
| 11687 | |||
| 11688 | turn_off = was_visible && (!visible || mode_changed); | ||
| 11689 | turn_on = visible && (!was_visible || mode_changed); | ||
| 11690 | |||
| 11691 | DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, | ||
| 11692 | plane->base.id, fb ? fb->base.id : -1); | ||
| 11693 | |||
| 11694 | DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", | ||
| 11695 | plane->base.id, was_visible, visible, | ||
| 11696 | turn_off, turn_on, mode_changed); | ||
| 11697 | |||
| 11698 | if (turn_on) { | ||
| 11699 | intel_crtc->atomic.update_wm_pre = true; | ||
| 11700 | /* must disable cxsr around plane enable/disable */ | ||
| 11701 | if (plane->type != DRM_PLANE_TYPE_CURSOR) { | ||
| 11702 | intel_crtc->atomic.disable_cxsr = true; | ||
| 11703 | /* to potentially re-enable cxsr */ | ||
| 11704 | intel_crtc->atomic.wait_vblank = true; | ||
| 11705 | intel_crtc->atomic.update_wm_post = true; | ||
| 11706 | } | ||
| 11707 | } else if (turn_off) { | ||
| 11708 | intel_crtc->atomic.update_wm_post = true; | ||
| 11709 | /* must disable cxsr around plane enable/disable */ | ||
| 11710 | if (plane->type != DRM_PLANE_TYPE_CURSOR) { | ||
| 11711 | if (is_crtc_enabled) | ||
| 11712 | intel_crtc->atomic.wait_vblank = true; | ||
| 11713 | intel_crtc->atomic.disable_cxsr = true; | ||
| 11714 | } | ||
| 11715 | } else if (intel_wm_need_update(plane, plane_state)) { | ||
| 11716 | intel_crtc->atomic.update_wm_pre = true; | ||
| 11717 | } | ||
| 11718 | |||
| 11719 | if (visible) | ||
| 11720 | intel_crtc->atomic.fb_bits |= | ||
| 11721 | to_intel_plane(plane)->frontbuffer_bit; | ||
| 11722 | |||
| 11723 | switch (plane->type) { | ||
| 11724 | case DRM_PLANE_TYPE_PRIMARY: | ||
| 11725 | intel_crtc->atomic.wait_for_flips = true; | ||
| 11726 | intel_crtc->atomic.pre_disable_primary = turn_off; | ||
| 11727 | intel_crtc->atomic.post_enable_primary = turn_on; | ||
| 11728 | |||
| 11729 | if (turn_off) { | ||
| 11730 | /* | ||
| 11731 | * FIXME: Actually if we will still have any other | ||
| 11732 | * plane enabled on the pipe we could let IPS enabled | ||
| 11733 | * still, but for now lets consider that when we make | ||
| 11734 | * primary invisible by setting DSPCNTR to 0 on | ||
| 11735 | * update_primary_plane function IPS needs to be | ||
| 11736 | * disable. | ||
| 11737 | */ | ||
| 11738 | intel_crtc->atomic.disable_ips = true; | ||
| 11739 | |||
| 11740 | intel_crtc->atomic.disable_fbc = true; | ||
| 11741 | } | ||
| 11742 | |||
| 11743 | /* | ||
| 11744 | * FBC does not work on some platforms for rotated | ||
| 11745 | * planes, so disable it when rotation is not 0 and | ||
| 11746 | * update it when rotation is set back to 0. | ||
| 11747 | * | ||
| 11748 | * FIXME: This is redundant with the fbc update done in | ||
| 11749 | * the primary plane enable function except that that | ||
| 11750 | * one is done too late. We eventually need to unify | ||
| 11751 | * this. | ||
| 11752 | */ | ||
| 11753 | |||
| 11754 | if (visible && | ||
| 11755 | INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && | ||
| 11756 | dev_priv->fbc.crtc == intel_crtc && | ||
| 11757 | plane_state->rotation != BIT(DRM_ROTATE_0)) | ||
| 11758 | intel_crtc->atomic.disable_fbc = true; | ||
| 11759 | |||
| 11760 | /* | ||
| 11761 | * BDW signals flip done immediately if the plane | ||
| 11762 | * is disabled, even if the plane enable is already | ||
| 11763 | * armed to occur at the next vblank :( | ||
| 11764 | */ | ||
| 11765 | if (turn_on && IS_BROADWELL(dev)) | ||
| 11766 | intel_crtc->atomic.wait_vblank = true; | ||
| 11767 | |||
| 11768 | intel_crtc->atomic.update_fbc |= visible || mode_changed; | ||
| 11769 | break; | ||
| 11770 | case DRM_PLANE_TYPE_CURSOR: | ||
| 11771 | break; | ||
| 11772 | case DRM_PLANE_TYPE_OVERLAY: | ||
| 11773 | if (turn_off && !mode_changed) { | ||
| 11774 | intel_crtc->atomic.wait_vblank = true; | ||
| 11775 | intel_crtc->atomic.update_sprite_watermarks |= | ||
| 11776 | 1 << i; | ||
| 11777 | } | ||
| 11778 | } | ||
| 11779 | return 0; | ||
| 11780 | } | ||
| 11781 | |||
| 11782 | static bool encoders_cloneable(const struct intel_encoder *a, | ||
| 11783 | const struct intel_encoder *b) | ||
| 11784 | { | ||
| 11785 | /* masks could be asymmetric, so check both ways */ | ||
| 11786 | return a == b || (a->cloneable & (1 << b->type) && | ||
| 11787 | b->cloneable & (1 << a->type)); | ||
| 11788 | } | ||
| 11789 | |||
| 11790 | static bool check_single_encoder_cloning(struct drm_atomic_state *state, | ||
| 11791 | struct intel_crtc *crtc, | ||
| 11792 | struct intel_encoder *encoder) | ||
| 11793 | { | ||
| 11794 | struct intel_encoder *source_encoder; | ||
| 11795 | struct drm_connector *connector; | ||
| 11796 | struct drm_connector_state *connector_state; | ||
| 11797 | int i; | ||
| 11798 | |||
| 11799 | for_each_connector_in_state(state, connector, connector_state, i) { | ||
| 11800 | if (connector_state->crtc != &crtc->base) | ||
| 11801 | continue; | ||
| 11802 | |||
| 11803 | source_encoder = | ||
| 11804 | to_intel_encoder(connector_state->best_encoder); | ||
| 11805 | if (!encoders_cloneable(encoder, source_encoder)) | ||
| 11806 | return false; | ||
| 11807 | } | ||
| 11808 | |||
| 11809 | return true; | ||
| 11810 | } | ||
| 11811 | |||
| 11812 | static bool check_encoder_cloning(struct drm_atomic_state *state, | ||
| 11813 | struct intel_crtc *crtc) | ||
| 11320 | { | 11814 | { |
| 11321 | struct intel_crtc *crtc; | ||
| 11322 | struct intel_encoder *encoder; | 11815 | struct intel_encoder *encoder; |
| 11323 | struct intel_connector *connector; | 11816 | struct drm_connector *connector; |
| 11817 | struct drm_connector_state *connector_state; | ||
| 11818 | int i; | ||
| 11324 | 11819 | ||
| 11325 | for_each_intel_connector(dev, connector) { | 11820 | for_each_connector_in_state(state, connector, connector_state, i) { |
| 11326 | connector->new_encoder = | 11821 | if (connector_state->crtc != &crtc->base) |
| 11327 | to_intel_encoder(connector->base.encoder); | 11822 | continue; |
| 11823 | |||
| 11824 | encoder = to_intel_encoder(connector_state->best_encoder); | ||
| 11825 | if (!check_single_encoder_cloning(state, crtc, encoder)) | ||
| 11826 | return false; | ||
| 11328 | } | 11827 | } |
| 11329 | 11828 | ||
| 11330 | for_each_intel_encoder(dev, encoder) { | 11829 | return true; |
| 11331 | encoder->new_crtc = | 11830 | } |
| 11332 | to_intel_crtc(encoder->base.crtc); | 11831 | |
| 11832 | static int intel_crtc_atomic_check(struct drm_crtc *crtc, | ||
| 11833 | struct drm_crtc_state *crtc_state) | ||
| 11834 | { | ||
| 11835 | struct drm_device *dev = crtc->dev; | ||
| 11836 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 11837 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 11838 | struct intel_crtc_state *pipe_config = | ||
| 11839 | to_intel_crtc_state(crtc_state); | ||
| 11840 | struct drm_atomic_state *state = crtc_state->state; | ||
| 11841 | int ret, idx = crtc->base.id; | ||
| 11842 | bool mode_changed = needs_modeset(crtc_state); | ||
| 11843 | |||
| 11844 | if (mode_changed && !check_encoder_cloning(state, intel_crtc)) { | ||
| 11845 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); | ||
| 11846 | return -EINVAL; | ||
| 11333 | } | 11847 | } |
| 11334 | 11848 | ||
| 11335 | for_each_intel_crtc(dev, crtc) { | 11849 | I915_STATE_WARN(crtc->state->active != intel_crtc->active, |
| 11336 | crtc->new_enabled = crtc->base.state->enable; | 11850 | "[CRTC:%i] mismatch between state->active(%i) and crtc->active(%i)\n", |
| 11851 | idx, crtc->state->active, intel_crtc->active); | ||
| 11852 | |||
| 11853 | if (mode_changed && !crtc_state->active) | ||
| 11854 | intel_crtc->atomic.update_wm_post = true; | ||
| 11855 | |||
| 11856 | if (mode_changed && crtc_state->enable && | ||
| 11857 | dev_priv->display.crtc_compute_clock && | ||
| 11858 | !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) { | ||
| 11859 | ret = dev_priv->display.crtc_compute_clock(intel_crtc, | ||
| 11860 | pipe_config); | ||
| 11861 | if (ret) | ||
| 11862 | return ret; | ||
| 11863 | } | ||
| 11864 | |||
| 11865 | ret = 0; | ||
| 11866 | if (INTEL_INFO(dev)->gen >= 9) { | ||
| 11867 | if (mode_changed) | ||
| 11868 | ret = skl_update_scaler_crtc(pipe_config); | ||
| 11869 | |||
| 11870 | if (!ret) | ||
| 11871 | ret = intel_atomic_setup_scalers(dev, intel_crtc, | ||
| 11872 | pipe_config); | ||
| 11337 | } | 11873 | } |
| 11874 | |||
| 11875 | return ret; | ||
| 11338 | } | 11876 | } |
| 11339 | 11877 | ||
| 11340 | /* Transitional helper to copy current connector/encoder state to | 11878 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { |
| 11341 | * connector->state. This is needed so that code that is partially | 11879 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
| 11342 | * converted to atomic does the right thing. | 11880 | .load_lut = intel_crtc_load_lut, |
| 11343 | */ | 11881 | .atomic_begin = intel_begin_crtc_commit, |
| 11882 | .atomic_flush = intel_finish_crtc_commit, | ||
| 11883 | .atomic_check = intel_crtc_atomic_check, | ||
| 11884 | }; | ||
| 11885 | |||
| 11344 | static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) | 11886 | static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) |
| 11345 | { | 11887 | { |
| 11346 | struct intel_connector *connector; | 11888 | struct intel_connector *connector; |
| @@ -11358,39 +11900,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) | |||
| 11358 | } | 11900 | } |
| 11359 | } | 11901 | } |
| 11360 | 11902 | ||
| 11361 | /* Fixup legacy state after an atomic state swap. | ||
| 11362 | */ | ||
| 11363 | static void intel_modeset_fixup_state(struct drm_atomic_state *state) | ||
| 11364 | { | ||
| 11365 | struct intel_crtc *crtc; | ||
| 11366 | struct intel_encoder *encoder; | ||
| 11367 | struct intel_connector *connector; | ||
| 11368 | |||
| 11369 | for_each_intel_connector(state->dev, connector) { | ||
| 11370 | connector->base.encoder = connector->base.state->best_encoder; | ||
| 11371 | if (connector->base.encoder) | ||
| 11372 | connector->base.encoder->crtc = | ||
| 11373 | connector->base.state->crtc; | ||
| 11374 | } | ||
| 11375 | |||
| 11376 | /* Update crtc of disabled encoders */ | ||
| 11377 | for_each_intel_encoder(state->dev, encoder) { | ||
| 11378 | int num_connectors = 0; | ||
| 11379 | |||
| 11380 | for_each_intel_connector(state->dev, connector) | ||
| 11381 | if (connector->base.encoder == &encoder->base) | ||
| 11382 | num_connectors++; | ||
| 11383 | |||
| 11384 | if (num_connectors == 0) | ||
| 11385 | encoder->base.crtc = NULL; | ||
| 11386 | } | ||
| 11387 | |||
| 11388 | for_each_intel_crtc(state->dev, crtc) { | ||
| 11389 | crtc->base.enabled = crtc->base.state->enable; | ||
| 11390 | crtc->config = to_intel_crtc_state(crtc->base.state); | ||
| 11391 | } | ||
| 11392 | } | ||
| 11393 | |||
| 11394 | static void | 11903 | static void |
| 11395 | connected_sink_compute_bpp(struct intel_connector *connector, | 11904 | connected_sink_compute_bpp(struct intel_connector *connector, |
| 11396 | struct intel_crtc_state *pipe_config) | 11905 | struct intel_crtc_state *pipe_config) |
| @@ -11526,17 +12035,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
| 11526 | DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); | 12035 | DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); |
| 11527 | 12036 | ||
| 11528 | if (IS_BROXTON(dev)) { | 12037 | if (IS_BROXTON(dev)) { |
| 11529 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, " | 12038 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," |
| 11530 | "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " | 12039 | "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " |
| 11531 | "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n", | 12040 | "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", |
| 11532 | pipe_config->ddi_pll_sel, | 12041 | pipe_config->ddi_pll_sel, |
| 11533 | pipe_config->dpll_hw_state.ebb0, | 12042 | pipe_config->dpll_hw_state.ebb0, |
| 12043 | pipe_config->dpll_hw_state.ebb4, | ||
| 11534 | pipe_config->dpll_hw_state.pll0, | 12044 | pipe_config->dpll_hw_state.pll0, |
| 11535 | pipe_config->dpll_hw_state.pll1, | 12045 | pipe_config->dpll_hw_state.pll1, |
| 11536 | pipe_config->dpll_hw_state.pll2, | 12046 | pipe_config->dpll_hw_state.pll2, |
| 11537 | pipe_config->dpll_hw_state.pll3, | 12047 | pipe_config->dpll_hw_state.pll3, |
| 11538 | pipe_config->dpll_hw_state.pll6, | 12048 | pipe_config->dpll_hw_state.pll6, |
| 11539 | pipe_config->dpll_hw_state.pll8, | 12049 | pipe_config->dpll_hw_state.pll8, |
| 12050 | pipe_config->dpll_hw_state.pll9, | ||
| 12051 | pipe_config->dpll_hw_state.pll10, | ||
| 11540 | pipe_config->dpll_hw_state.pcsdw12); | 12052 | pipe_config->dpll_hw_state.pcsdw12); |
| 11541 | } else if (IS_SKYLAKE(dev)) { | 12053 | } else if (IS_SKYLAKE(dev)) { |
| 11542 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " | 12054 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " |
| @@ -11593,56 +12105,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
| 11593 | } | 12105 | } |
| 11594 | } | 12106 | } |
| 11595 | 12107 | ||
| 11596 | static bool encoders_cloneable(const struct intel_encoder *a, | ||
| 11597 | const struct intel_encoder *b) | ||
| 11598 | { | ||
| 11599 | /* masks could be asymmetric, so check both ways */ | ||
| 11600 | return a == b || (a->cloneable & (1 << b->type) && | ||
| 11601 | b->cloneable & (1 << a->type)); | ||
| 11602 | } | ||
| 11603 | |||
| 11604 | static bool check_single_encoder_cloning(struct drm_atomic_state *state, | ||
| 11605 | struct intel_crtc *crtc, | ||
| 11606 | struct intel_encoder *encoder) | ||
| 11607 | { | ||
| 11608 | struct intel_encoder *source_encoder; | ||
| 11609 | struct drm_connector *connector; | ||
| 11610 | struct drm_connector_state *connector_state; | ||
| 11611 | int i; | ||
| 11612 | |||
| 11613 | for_each_connector_in_state(state, connector, connector_state, i) { | ||
| 11614 | if (connector_state->crtc != &crtc->base) | ||
| 11615 | continue; | ||
| 11616 | |||
| 11617 | source_encoder = | ||
| 11618 | to_intel_encoder(connector_state->best_encoder); | ||
| 11619 | if (!encoders_cloneable(encoder, source_encoder)) | ||
| 11620 | return false; | ||
| 11621 | } | ||
| 11622 | |||
| 11623 | return true; | ||
| 11624 | } | ||
| 11625 | |||
| 11626 | static bool check_encoder_cloning(struct drm_atomic_state *state, | ||
| 11627 | struct intel_crtc *crtc) | ||
| 11628 | { | ||
| 11629 | struct intel_encoder *encoder; | ||
| 11630 | struct drm_connector *connector; | ||
| 11631 | struct drm_connector_state *connector_state; | ||
| 11632 | int i; | ||
| 11633 | |||
| 11634 | for_each_connector_in_state(state, connector, connector_state, i) { | ||
| 11635 | if (connector_state->crtc != &crtc->base) | ||
| 11636 | continue; | ||
| 11637 | |||
| 11638 | encoder = to_intel_encoder(connector_state->best_encoder); | ||
| 11639 | if (!check_single_encoder_cloning(state, crtc, encoder)) | ||
| 11640 | return false; | ||
| 11641 | } | ||
| 11642 | |||
| 11643 | return true; | ||
| 11644 | } | ||
| 11645 | |||
| 11646 | static bool check_digital_port_conflicts(struct drm_atomic_state *state) | 12108 | static bool check_digital_port_conflicts(struct drm_atomic_state *state) |
| 11647 | { | 12109 | { |
| 11648 | struct drm_device *dev = state->dev; | 12110 | struct drm_device *dev = state->dev; |
| @@ -11719,9 +12181,9 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) | |||
| 11719 | 12181 | ||
| 11720 | static int | 12182 | static int |
| 11721 | intel_modeset_pipe_config(struct drm_crtc *crtc, | 12183 | intel_modeset_pipe_config(struct drm_crtc *crtc, |
| 11722 | struct drm_atomic_state *state, | ||
| 11723 | struct intel_crtc_state *pipe_config) | 12184 | struct intel_crtc_state *pipe_config) |
| 11724 | { | 12185 | { |
| 12186 | struct drm_atomic_state *state = pipe_config->base.state; | ||
| 11725 | struct intel_encoder *encoder; | 12187 | struct intel_encoder *encoder; |
| 11726 | struct drm_connector *connector; | 12188 | struct drm_connector *connector; |
| 11727 | struct drm_connector_state *connector_state; | 12189 | struct drm_connector_state *connector_state; |
| @@ -11729,16 +12191,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
| 11729 | int i; | 12191 | int i; |
| 11730 | bool retry = true; | 12192 | bool retry = true; |
| 11731 | 12193 | ||
| 11732 | if (!check_encoder_cloning(state, to_intel_crtc(crtc))) { | ||
| 11733 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); | ||
| 11734 | return -EINVAL; | ||
| 11735 | } | ||
| 11736 | |||
| 11737 | if (!check_digital_port_conflicts(state)) { | ||
| 11738 | DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); | ||
| 11739 | return -EINVAL; | ||
| 11740 | } | ||
| 11741 | |||
| 11742 | clear_intel_crtc_state(pipe_config); | 12194 | clear_intel_crtc_state(pipe_config); |
| 11743 | 12195 | ||
| 11744 | pipe_config->cpu_transcoder = | 12196 | pipe_config->cpu_transcoder = |
| @@ -11830,7 +12282,6 @@ encoder_retry: | |||
| 11830 | DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", | 12282 | DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", |
| 11831 | base_bpp, pipe_config->pipe_bpp, pipe_config->dither); | 12283 | base_bpp, pipe_config->pipe_bpp, pipe_config->dither); |
| 11832 | 12284 | ||
| 11833 | return 0; | ||
| 11834 | fail: | 12285 | fail: |
| 11835 | return ret; | 12286 | return ret; |
| 11836 | } | 12287 | } |
| @@ -11847,73 +12298,66 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc) | |||
| 11847 | return false; | 12298 | return false; |
| 11848 | } | 12299 | } |
| 11849 | 12300 | ||
| 11850 | static bool | ||
| 11851 | needs_modeset(struct drm_crtc_state *state) | ||
| 11852 | { | ||
| 11853 | return state->mode_changed || state->active_changed; | ||
| 11854 | } | ||
| 11855 | |||
| 11856 | static void | 12301 | static void |
| 11857 | intel_modeset_update_state(struct drm_atomic_state *state) | 12302 | intel_modeset_update_state(struct drm_atomic_state *state) |
| 11858 | { | 12303 | { |
| 11859 | struct drm_device *dev = state->dev; | 12304 | struct drm_device *dev = state->dev; |
| 11860 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 11861 | struct intel_encoder *intel_encoder; | 12305 | struct intel_encoder *intel_encoder; |
| 11862 | struct drm_crtc *crtc; | 12306 | struct drm_crtc *crtc; |
| 11863 | struct drm_crtc_state *crtc_state; | 12307 | struct drm_crtc_state *crtc_state; |
| 11864 | struct drm_connector *connector; | 12308 | struct drm_connector *connector; |
| 11865 | int i; | 12309 | int i; |
| 11866 | 12310 | ||
| 11867 | intel_shared_dpll_commit(dev_priv); | 12311 | intel_shared_dpll_commit(state); |
| 11868 | 12312 | ||
| 11869 | for_each_intel_encoder(dev, intel_encoder) { | 12313 | for_each_intel_encoder(dev, intel_encoder) { |
| 11870 | if (!intel_encoder->base.crtc) | 12314 | if (!intel_encoder->base.crtc) |
| 11871 | continue; | 12315 | continue; |
| 11872 | 12316 | ||
| 11873 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 12317 | crtc = intel_encoder->base.crtc; |
| 11874 | if (crtc != intel_encoder->base.crtc) | 12318 | crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); |
| 11875 | continue; | 12319 | if (!crtc_state || !needs_modeset(crtc->state)) |
| 11876 | 12320 | continue; | |
| 11877 | if (crtc_state->enable && needs_modeset(crtc_state)) | ||
| 11878 | intel_encoder->connectors_active = false; | ||
| 11879 | 12321 | ||
| 11880 | break; | 12322 | intel_encoder->connectors_active = false; |
| 11881 | } | ||
| 11882 | } | 12323 | } |
| 11883 | 12324 | ||
| 11884 | drm_atomic_helper_swap_state(state->dev, state); | 12325 | drm_atomic_helper_update_legacy_modeset_state(state->dev, state); |
| 11885 | intel_modeset_fixup_state(state); | ||
| 11886 | 12326 | ||
| 11887 | /* Double check state. */ | 12327 | /* Double check state. */ |
| 11888 | for_each_crtc(dev, crtc) { | 12328 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 11889 | WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc)); | 12329 | WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc)); |
| 12330 | |||
| 12331 | to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state); | ||
| 12332 | |||
| 12333 | /* Update hwmode for vblank functions */ | ||
| 12334 | if (crtc->state->active) | ||
| 12335 | crtc->hwmode = crtc->state->adjusted_mode; | ||
| 12336 | else | ||
| 12337 | crtc->hwmode.crtc_clock = 0; | ||
| 11890 | } | 12338 | } |
| 11891 | 12339 | ||
| 11892 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 12340 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 11893 | if (!connector->encoder || !connector->encoder->crtc) | 12341 | if (!connector->encoder || !connector->encoder->crtc) |
| 11894 | continue; | 12342 | continue; |
| 11895 | 12343 | ||
| 11896 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 12344 | crtc = connector->encoder->crtc; |
| 11897 | if (crtc != connector->encoder->crtc) | 12345 | crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); |
| 11898 | continue; | 12346 | if (!crtc_state || !needs_modeset(crtc->state)) |
| 11899 | 12347 | continue; | |
| 11900 | if (crtc->state->enable && needs_modeset(crtc->state)) { | ||
| 11901 | struct drm_property *dpms_property = | ||
| 11902 | dev->mode_config.dpms_property; | ||
| 11903 | 12348 | ||
| 11904 | connector->dpms = DRM_MODE_DPMS_ON; | 12349 | if (crtc->state->active) { |
| 11905 | drm_object_property_set_value(&connector->base, | 12350 | struct drm_property *dpms_property = |
| 11906 | dpms_property, | 12351 | dev->mode_config.dpms_property; |
| 11907 | DRM_MODE_DPMS_ON); | ||
| 11908 | 12352 | ||
| 11909 | intel_encoder = to_intel_encoder(connector->encoder); | 12353 | connector->dpms = DRM_MODE_DPMS_ON; |
| 11910 | intel_encoder->connectors_active = true; | 12354 | drm_object_property_set_value(&connector->base, dpms_property, DRM_MODE_DPMS_ON); |
| 11911 | } | ||
| 11912 | 12355 | ||
| 11913 | break; | 12356 | intel_encoder = to_intel_encoder(connector->encoder); |
| 11914 | } | 12357 | intel_encoder->connectors_active = true; |
| 12358 | } else | ||
| 12359 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
| 11915 | } | 12360 | } |
| 11916 | |||
| 11917 | } | 12361 | } |
| 11918 | 12362 | ||
| 11919 | static bool intel_fuzzy_clock_check(int clock1, int clock2) | 12363 | static bool intel_fuzzy_clock_check(int clock1, int clock2) |
| @@ -11940,27 +12384,133 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2) | |||
| 11940 | base.head) \ | 12384 | base.head) \ |
| 11941 | if (mask & (1 <<(intel_crtc)->pipe)) | 12385 | if (mask & (1 <<(intel_crtc)->pipe)) |
| 11942 | 12386 | ||
| 12387 | |||
| 12388 | static bool | ||
| 12389 | intel_compare_m_n(unsigned int m, unsigned int n, | ||
| 12390 | unsigned int m2, unsigned int n2, | ||
| 12391 | bool exact) | ||
| 12392 | { | ||
| 12393 | if (m == m2 && n == n2) | ||
| 12394 | return true; | ||
| 12395 | |||
| 12396 | if (exact || !m || !n || !m2 || !n2) | ||
| 12397 | return false; | ||
| 12398 | |||
| 12399 | BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); | ||
| 12400 | |||
| 12401 | if (m > m2) { | ||
| 12402 | while (m > m2) { | ||
| 12403 | m2 <<= 1; | ||
| 12404 | n2 <<= 1; | ||
| 12405 | } | ||
| 12406 | } else if (m < m2) { | ||
| 12407 | while (m < m2) { | ||
| 12408 | m <<= 1; | ||
| 12409 | n <<= 1; | ||
| 12410 | } | ||
| 12411 | } | ||
| 12412 | |||
| 12413 | return m == m2 && n == n2; | ||
| 12414 | } | ||
| 12415 | |||
| 12416 | static bool | ||
| 12417 | intel_compare_link_m_n(const struct intel_link_m_n *m_n, | ||
| 12418 | struct intel_link_m_n *m2_n2, | ||
| 12419 | bool adjust) | ||
| 12420 | { | ||
| 12421 | if (m_n->tu == m2_n2->tu && | ||
| 12422 | intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, | ||
| 12423 | m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && | ||
| 12424 | intel_compare_m_n(m_n->link_m, m_n->link_n, | ||
| 12425 | m2_n2->link_m, m2_n2->link_n, !adjust)) { | ||
| 12426 | if (adjust) | ||
| 12427 | *m2_n2 = *m_n; | ||
| 12428 | |||
| 12429 | return true; | ||
| 12430 | } | ||
| 12431 | |||
| 12432 | return false; | ||
| 12433 | } | ||
| 12434 | |||
| 11943 | static bool | 12435 | static bool |
| 11944 | intel_pipe_config_compare(struct drm_device *dev, | 12436 | intel_pipe_config_compare(struct drm_device *dev, |
| 11945 | struct intel_crtc_state *current_config, | 12437 | struct intel_crtc_state *current_config, |
| 11946 | struct intel_crtc_state *pipe_config) | 12438 | struct intel_crtc_state *pipe_config, |
| 12439 | bool adjust) | ||
| 11947 | { | 12440 | { |
| 12441 | bool ret = true; | ||
| 12442 | |||
| 12443 | #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ | ||
| 12444 | do { \ | ||
| 12445 | if (!adjust) \ | ||
| 12446 | DRM_ERROR(fmt, ##__VA_ARGS__); \ | ||
| 12447 | else \ | ||
| 12448 | DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ | ||
| 12449 | } while (0) | ||
| 12450 | |||
| 11948 | #define PIPE_CONF_CHECK_X(name) \ | 12451 | #define PIPE_CONF_CHECK_X(name) \ |
| 11949 | if (current_config->name != pipe_config->name) { \ | 12452 | if (current_config->name != pipe_config->name) { \ |
| 11950 | DRM_ERROR("mismatch in " #name " " \ | 12453 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ |
| 11951 | "(expected 0x%08x, found 0x%08x)\n", \ | 12454 | "(expected 0x%08x, found 0x%08x)\n", \ |
| 11952 | current_config->name, \ | 12455 | current_config->name, \ |
| 11953 | pipe_config->name); \ | 12456 | pipe_config->name); \ |
| 11954 | return false; \ | 12457 | ret = false; \ |
| 11955 | } | 12458 | } |
| 11956 | 12459 | ||
| 11957 | #define PIPE_CONF_CHECK_I(name) \ | 12460 | #define PIPE_CONF_CHECK_I(name) \ |
| 11958 | if (current_config->name != pipe_config->name) { \ | 12461 | if (current_config->name != pipe_config->name) { \ |
| 11959 | DRM_ERROR("mismatch in " #name " " \ | 12462 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ |
| 11960 | "(expected %i, found %i)\n", \ | 12463 | "(expected %i, found %i)\n", \ |
| 11961 | current_config->name, \ | 12464 | current_config->name, \ |
| 11962 | pipe_config->name); \ | 12465 | pipe_config->name); \ |
| 11963 | return false; \ | 12466 | ret = false; \ |
| 12467 | } | ||
| 12468 | |||
| 12469 | #define PIPE_CONF_CHECK_M_N(name) \ | ||
| 12470 | if (!intel_compare_link_m_n(¤t_config->name, \ | ||
| 12471 | &pipe_config->name,\ | ||
| 12472 | adjust)) { \ | ||
| 12473 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | ||
| 12474 | "(expected tu %i gmch %i/%i link %i/%i, " \ | ||
| 12475 | "found tu %i, gmch %i/%i link %i/%i)\n", \ | ||
| 12476 | current_config->name.tu, \ | ||
| 12477 | current_config->name.gmch_m, \ | ||
| 12478 | current_config->name.gmch_n, \ | ||
| 12479 | current_config->name.link_m, \ | ||
| 12480 | current_config->name.link_n, \ | ||
| 12481 | pipe_config->name.tu, \ | ||
| 12482 | pipe_config->name.gmch_m, \ | ||
| 12483 | pipe_config->name.gmch_n, \ | ||
| 12484 | pipe_config->name.link_m, \ | ||
| 12485 | pipe_config->name.link_n); \ | ||
| 12486 | ret = false; \ | ||
| 12487 | } | ||
| 12488 | |||
| 12489 | #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ | ||
| 12490 | if (!intel_compare_link_m_n(¤t_config->name, \ | ||
| 12491 | &pipe_config->name, adjust) && \ | ||
| 12492 | !intel_compare_link_m_n(¤t_config->alt_name, \ | ||
| 12493 | &pipe_config->name, adjust)) { \ | ||
| 12494 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | ||
| 12495 | "(expected tu %i gmch %i/%i link %i/%i, " \ | ||
| 12496 | "or tu %i gmch %i/%i link %i/%i, " \ | ||
| 12497 | "found tu %i, gmch %i/%i link %i/%i)\n", \ | ||
| 12498 | current_config->name.tu, \ | ||
| 12499 | current_config->name.gmch_m, \ | ||
| 12500 | current_config->name.gmch_n, \ | ||
| 12501 | current_config->name.link_m, \ | ||
| 12502 | current_config->name.link_n, \ | ||
| 12503 | current_config->alt_name.tu, \ | ||
| 12504 | current_config->alt_name.gmch_m, \ | ||
| 12505 | current_config->alt_name.gmch_n, \ | ||
| 12506 | current_config->alt_name.link_m, \ | ||
| 12507 | current_config->alt_name.link_n, \ | ||
| 12508 | pipe_config->name.tu, \ | ||
| 12509 | pipe_config->name.gmch_m, \ | ||
| 12510 | pipe_config->name.gmch_n, \ | ||
| 12511 | pipe_config->name.link_m, \ | ||
| 12512 | pipe_config->name.link_n); \ | ||
| 12513 | ret = false; \ | ||
| 11964 | } | 12514 | } |
| 11965 | 12515 | ||
| 11966 | /* This is required for BDW+ where there is only one set of registers for | 12516 | /* This is required for BDW+ where there is only one set of registers for |
| @@ -11971,30 +12521,30 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 11971 | #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ | 12521 | #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ |
| 11972 | if ((current_config->name != pipe_config->name) && \ | 12522 | if ((current_config->name != pipe_config->name) && \ |
| 11973 | (current_config->alt_name != pipe_config->name)) { \ | 12523 | (current_config->alt_name != pipe_config->name)) { \ |
| 11974 | DRM_ERROR("mismatch in " #name " " \ | 12524 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ |
| 11975 | "(expected %i or %i, found %i)\n", \ | 12525 | "(expected %i or %i, found %i)\n", \ |
| 11976 | current_config->name, \ | 12526 | current_config->name, \ |
| 11977 | current_config->alt_name, \ | 12527 | current_config->alt_name, \ |
| 11978 | pipe_config->name); \ | 12528 | pipe_config->name); \ |
| 11979 | return false; \ | 12529 | ret = false; \ |
| 11980 | } | 12530 | } |
| 11981 | 12531 | ||
| 11982 | #define PIPE_CONF_CHECK_FLAGS(name, mask) \ | 12532 | #define PIPE_CONF_CHECK_FLAGS(name, mask) \ |
| 11983 | if ((current_config->name ^ pipe_config->name) & (mask)) { \ | 12533 | if ((current_config->name ^ pipe_config->name) & (mask)) { \ |
| 11984 | DRM_ERROR("mismatch in " #name "(" #mask ") " \ | 12534 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ |
| 11985 | "(expected %i, found %i)\n", \ | 12535 | "(expected %i, found %i)\n", \ |
| 11986 | current_config->name & (mask), \ | 12536 | current_config->name & (mask), \ |
| 11987 | pipe_config->name & (mask)); \ | 12537 | pipe_config->name & (mask)); \ |
| 11988 | return false; \ | 12538 | ret = false; \ |
| 11989 | } | 12539 | } |
| 11990 | 12540 | ||
| 11991 | #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ | 12541 | #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ |
| 11992 | if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ | 12542 | if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ |
| 11993 | DRM_ERROR("mismatch in " #name " " \ | 12543 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ |
| 11994 | "(expected %i, found %i)\n", \ | 12544 | "(expected %i, found %i)\n", \ |
| 11995 | current_config->name, \ | 12545 | current_config->name, \ |
| 11996 | pipe_config->name); \ | 12546 | pipe_config->name); \ |
| 11997 | return false; \ | 12547 | ret = false; \ |
| 11998 | } | 12548 | } |
| 11999 | 12549 | ||
| 12000 | #define PIPE_CONF_QUIRK(quirk) \ | 12550 | #define PIPE_CONF_QUIRK(quirk) \ |
| @@ -12004,35 +12554,18 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 12004 | 12554 | ||
| 12005 | PIPE_CONF_CHECK_I(has_pch_encoder); | 12555 | PIPE_CONF_CHECK_I(has_pch_encoder); |
| 12006 | PIPE_CONF_CHECK_I(fdi_lanes); | 12556 | PIPE_CONF_CHECK_I(fdi_lanes); |
| 12007 | PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); | 12557 | PIPE_CONF_CHECK_M_N(fdi_m_n); |
| 12008 | PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); | ||
| 12009 | PIPE_CONF_CHECK_I(fdi_m_n.link_m); | ||
| 12010 | PIPE_CONF_CHECK_I(fdi_m_n.link_n); | ||
| 12011 | PIPE_CONF_CHECK_I(fdi_m_n.tu); | ||
| 12012 | 12558 | ||
| 12013 | PIPE_CONF_CHECK_I(has_dp_encoder); | 12559 | PIPE_CONF_CHECK_I(has_dp_encoder); |
| 12014 | 12560 | ||
| 12015 | if (INTEL_INFO(dev)->gen < 8) { | 12561 | if (INTEL_INFO(dev)->gen < 8) { |
| 12016 | PIPE_CONF_CHECK_I(dp_m_n.gmch_m); | 12562 | PIPE_CONF_CHECK_M_N(dp_m_n); |
| 12017 | PIPE_CONF_CHECK_I(dp_m_n.gmch_n); | 12563 | |
| 12018 | PIPE_CONF_CHECK_I(dp_m_n.link_m); | 12564 | PIPE_CONF_CHECK_I(has_drrs); |
| 12019 | PIPE_CONF_CHECK_I(dp_m_n.link_n); | 12565 | if (current_config->has_drrs) |
| 12020 | PIPE_CONF_CHECK_I(dp_m_n.tu); | 12566 | PIPE_CONF_CHECK_M_N(dp_m2_n2); |
| 12021 | 12567 | } else | |
| 12022 | if (current_config->has_drrs) { | 12568 | PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); |
| 12023 | PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m); | ||
| 12024 | PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n); | ||
| 12025 | PIPE_CONF_CHECK_I(dp_m2_n2.link_m); | ||
| 12026 | PIPE_CONF_CHECK_I(dp_m2_n2.link_n); | ||
| 12027 | PIPE_CONF_CHECK_I(dp_m2_n2.tu); | ||
| 12028 | } | ||
| 12029 | } else { | ||
| 12030 | PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m); | ||
| 12031 | PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n); | ||
| 12032 | PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m); | ||
| 12033 | PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n); | ||
| 12034 | PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu); | ||
| 12035 | } | ||
| 12036 | 12569 | ||
| 12037 | PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); | 12570 | PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); |
| 12038 | PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); | 12571 | PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); |
| @@ -12074,21 +12607,11 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 12074 | PIPE_CONF_CHECK_I(pipe_src_w); | 12607 | PIPE_CONF_CHECK_I(pipe_src_w); |
| 12075 | PIPE_CONF_CHECK_I(pipe_src_h); | 12608 | PIPE_CONF_CHECK_I(pipe_src_h); |
| 12076 | 12609 | ||
| 12077 | /* | 12610 | PIPE_CONF_CHECK_I(gmch_pfit.control); |
| 12078 | * FIXME: BIOS likes to set up a cloned config with lvds+external | 12611 | /* pfit ratios are autocomputed by the hw on gen4+ */ |
| 12079 | * screen. Since we don't yet re-compute the pipe config when moving | 12612 | if (INTEL_INFO(dev)->gen < 4) |
| 12080 | * just the lvds port away to another pipe the sw tracking won't match. | 12613 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); |
| 12081 | * | 12614 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); |
| 12082 | * Proper atomic modesets with recomputed global state will fix this. | ||
| 12083 | * Until then just don't check gmch state for inherited modes. | ||
| 12084 | */ | ||
| 12085 | if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { | ||
| 12086 | PIPE_CONF_CHECK_I(gmch_pfit.control); | ||
| 12087 | /* pfit ratios are autocomputed by the hw on gen4+ */ | ||
| 12088 | if (INTEL_INFO(dev)->gen < 4) | ||
| 12089 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | ||
| 12090 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | ||
| 12091 | } | ||
| 12092 | 12615 | ||
| 12093 | PIPE_CONF_CHECK_I(pch_pfit.enabled); | 12616 | PIPE_CONF_CHECK_I(pch_pfit.enabled); |
| 12094 | if (current_config->pch_pfit.enabled) { | 12617 | if (current_config->pch_pfit.enabled) { |
| @@ -12128,8 +12651,9 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 12128 | #undef PIPE_CONF_CHECK_FLAGS | 12651 | #undef PIPE_CONF_CHECK_FLAGS |
| 12129 | #undef PIPE_CONF_CHECK_CLOCK_FUZZY | 12652 | #undef PIPE_CONF_CHECK_CLOCK_FUZZY |
| 12130 | #undef PIPE_CONF_QUIRK | 12653 | #undef PIPE_CONF_QUIRK |
| 12654 | #undef INTEL_ERR_OR_DBG_KMS | ||
| 12131 | 12655 | ||
| 12132 | return true; | 12656 | return ret; |
| 12133 | } | 12657 | } |
| 12134 | 12658 | ||
| 12135 | static void check_wm_state(struct drm_device *dev) | 12659 | static void check_wm_state(struct drm_device *dev) |
| @@ -12188,11 +12712,14 @@ check_connector_state(struct drm_device *dev) | |||
| 12188 | struct intel_connector *connector; | 12712 | struct intel_connector *connector; |
| 12189 | 12713 | ||
| 12190 | for_each_intel_connector(dev, connector) { | 12714 | for_each_intel_connector(dev, connector) { |
| 12715 | struct drm_encoder *encoder = connector->base.encoder; | ||
| 12716 | struct drm_connector_state *state = connector->base.state; | ||
| 12717 | |||
| 12191 | /* This also checks the encoder/connector hw state with the | 12718 | /* This also checks the encoder/connector hw state with the |
| 12192 | * ->get_hw_state callbacks. */ | 12719 | * ->get_hw_state callbacks. */ |
| 12193 | intel_connector_check_state(connector); | 12720 | intel_connector_check_state(connector); |
| 12194 | 12721 | ||
| 12195 | I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder, | 12722 | I915_STATE_WARN(state->best_encoder != encoder, |
| 12196 | "connector's staged encoder doesn't match current encoder\n"); | 12723 | "connector's staged encoder doesn't match current encoder\n"); |
| 12197 | } | 12724 | } |
| 12198 | } | 12725 | } |
| @@ -12212,8 +12739,6 @@ check_encoder_state(struct drm_device *dev) | |||
| 12212 | encoder->base.base.id, | 12739 | encoder->base.base.id, |
| 12213 | encoder->base.name); | 12740 | encoder->base.name); |
| 12214 | 12741 | ||
| 12215 | I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc, | ||
| 12216 | "encoder's stage crtc doesn't match current crtc\n"); | ||
| 12217 | I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, | 12742 | I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, |
| 12218 | "encoder's active_connectors set, but no crtc\n"); | 12743 | "encoder's active_connectors set, but no crtc\n"); |
| 12219 | 12744 | ||
| @@ -12223,6 +12748,10 @@ check_encoder_state(struct drm_device *dev) | |||
| 12223 | enabled = true; | 12748 | enabled = true; |
| 12224 | if (connector->base.dpms != DRM_MODE_DPMS_OFF) | 12749 | if (connector->base.dpms != DRM_MODE_DPMS_OFF) |
| 12225 | active = true; | 12750 | active = true; |
| 12751 | |||
| 12752 | I915_STATE_WARN(connector->base.state->crtc != | ||
| 12753 | encoder->base.crtc, | ||
| 12754 | "connector's crtc doesn't match encoder crtc\n"); | ||
| 12226 | } | 12755 | } |
| 12227 | /* | 12756 | /* |
| 12228 | * for MST connectors if we unplug the connector is gone | 12757 | * for MST connectors if we unplug the connector is gone |
| @@ -12317,8 +12846,15 @@ check_crtc_state(struct drm_device *dev) | |||
| 12317 | "crtc active state doesn't match with hw state " | 12846 | "crtc active state doesn't match with hw state " |
| 12318 | "(expected %i, found %i)\n", crtc->active, active); | 12847 | "(expected %i, found %i)\n", crtc->active, active); |
| 12319 | 12848 | ||
| 12320 | if (active && | 12849 | I915_STATE_WARN(crtc->active != crtc->base.state->active, |
| 12321 | !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) { | 12850 | "transitional active state does not match atomic hw state " |
| 12851 | "(expected %i, found %i)\n", crtc->base.state->active, crtc->active); | ||
| 12852 | |||
| 12853 | if (!active) | ||
| 12854 | continue; | ||
| 12855 | |||
| 12856 | if (!intel_pipe_config_compare(dev, crtc->config, | ||
| 12857 | &pipe_config, false)) { | ||
| 12322 | I915_STATE_WARN(1, "pipe state doesn't match!\n"); | 12858 | I915_STATE_WARN(1, "pipe state doesn't match!\n"); |
| 12323 | intel_dump_pipe_config(crtc, &pipe_config, | 12859 | intel_dump_pipe_config(crtc, &pipe_config, |
| 12324 | "[hw state]"); | 12860 | "[hw state]"); |
| @@ -12437,558 +12973,387 @@ static void update_scanline_offset(struct intel_crtc *crtc) | |||
| 12437 | crtc->scanline_offset = 1; | 12973 | crtc->scanline_offset = 1; |
| 12438 | } | 12974 | } |
| 12439 | 12975 | ||
| 12440 | static struct intel_crtc_state * | 12976 | static void intel_modeset_clear_plls(struct drm_atomic_state *state) |
| 12441 | intel_modeset_compute_config(struct drm_crtc *crtc, | ||
| 12442 | struct drm_atomic_state *state) | ||
| 12443 | { | ||
| 12444 | struct intel_crtc_state *pipe_config; | ||
| 12445 | int ret = 0; | ||
| 12446 | |||
| 12447 | ret = drm_atomic_add_affected_connectors(state, crtc); | ||
| 12448 | if (ret) | ||
| 12449 | return ERR_PTR(ret); | ||
| 12450 | |||
| 12451 | ret = drm_atomic_helper_check_modeset(state->dev, state); | ||
| 12452 | if (ret) | ||
| 12453 | return ERR_PTR(ret); | ||
| 12454 | |||
| 12455 | /* | ||
| 12456 | * Note this needs changes when we start tracking multiple modes | ||
| 12457 | * and crtcs. At that point we'll need to compute the whole config | ||
| 12458 | * (i.e. one pipe_config for each crtc) rather than just the one | ||
| 12459 | * for this crtc. | ||
| 12460 | */ | ||
| 12461 | pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc)); | ||
| 12462 | if (IS_ERR(pipe_config)) | ||
| 12463 | return pipe_config; | ||
| 12464 | |||
| 12465 | if (!pipe_config->base.enable) | ||
| 12466 | return pipe_config; | ||
| 12467 | |||
| 12468 | ret = intel_modeset_pipe_config(crtc, state, pipe_config); | ||
| 12469 | if (ret) | ||
| 12470 | return ERR_PTR(ret); | ||
| 12471 | |||
| 12472 | /* Check things that can only be changed through modeset */ | ||
| 12473 | if (pipe_config->has_audio != | ||
| 12474 | to_intel_crtc(crtc)->config->has_audio) | ||
| 12475 | pipe_config->base.mode_changed = true; | ||
| 12476 | |||
| 12477 | /* | ||
| 12478 | * Note we have an issue here with infoframes: current code | ||
| 12479 | * only updates them on the full mode set path per hw | ||
| 12480 | * requirements. So here we should be checking for any | ||
| 12481 | * required changes and forcing a mode set. | ||
| 12482 | */ | ||
| 12483 | |||
| 12484 | intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]"); | ||
| 12485 | |||
| 12486 | ret = drm_atomic_helper_check_planes(state->dev, state); | ||
| 12487 | if (ret) | ||
| 12488 | return ERR_PTR(ret); | ||
| 12489 | |||
| 12490 | return pipe_config; | ||
| 12491 | } | ||
| 12492 | |||
| 12493 | static int __intel_set_mode_setup_plls(struct drm_atomic_state *state) | ||
| 12494 | { | 12977 | { |
| 12495 | struct drm_device *dev = state->dev; | 12978 | struct drm_device *dev = state->dev; |
| 12496 | struct drm_i915_private *dev_priv = to_i915(dev); | 12979 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 12497 | unsigned clear_pipes = 0; | 12980 | struct intel_shared_dpll_config *shared_dpll = NULL; |
| 12498 | struct intel_crtc *intel_crtc; | 12981 | struct intel_crtc *intel_crtc; |
| 12499 | struct intel_crtc_state *intel_crtc_state; | 12982 | struct intel_crtc_state *intel_crtc_state; |
| 12500 | struct drm_crtc *crtc; | 12983 | struct drm_crtc *crtc; |
| 12501 | struct drm_crtc_state *crtc_state; | 12984 | struct drm_crtc_state *crtc_state; |
| 12502 | int ret = 0; | ||
| 12503 | int i; | 12985 | int i; |
| 12504 | 12986 | ||
| 12505 | if (!dev_priv->display.crtc_compute_clock) | 12987 | if (!dev_priv->display.crtc_compute_clock) |
| 12506 | return 0; | 12988 | return; |
| 12507 | |||
| 12508 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
| 12509 | intel_crtc = to_intel_crtc(crtc); | ||
| 12510 | intel_crtc_state = to_intel_crtc_state(crtc_state); | ||
| 12511 | |||
| 12512 | if (needs_modeset(crtc_state)) { | ||
| 12513 | clear_pipes |= 1 << intel_crtc->pipe; | ||
| 12514 | intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE; | ||
| 12515 | } | ||
| 12516 | } | ||
| 12517 | |||
| 12518 | ret = intel_shared_dpll_start_config(dev_priv, clear_pipes); | ||
| 12519 | if (ret) | ||
| 12520 | goto done; | ||
| 12521 | 12989 | ||
| 12522 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 12990 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 12523 | if (!needs_modeset(crtc_state) || !crtc_state->enable) | 12991 | int dpll; |
| 12524 | continue; | ||
| 12525 | 12992 | ||
| 12526 | intel_crtc = to_intel_crtc(crtc); | 12993 | intel_crtc = to_intel_crtc(crtc); |
| 12527 | intel_crtc_state = to_intel_crtc_state(crtc_state); | 12994 | intel_crtc_state = to_intel_crtc_state(crtc_state); |
| 12995 | dpll = intel_crtc_state->shared_dpll; | ||
| 12528 | 12996 | ||
| 12529 | ret = dev_priv->display.crtc_compute_clock(intel_crtc, | 12997 | if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE) |
| 12530 | intel_crtc_state); | 12998 | continue; |
| 12531 | if (ret) { | ||
| 12532 | intel_shared_dpll_abort_config(dev_priv); | ||
| 12533 | goto done; | ||
| 12534 | } | ||
| 12535 | } | ||
| 12536 | 12999 | ||
| 12537 | done: | 13000 | intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE; |
| 12538 | return ret; | ||
| 12539 | } | ||
| 12540 | 13001 | ||
| 12541 | /* Code that should eventually be part of atomic_check() */ | 13002 | if (!shared_dpll) |
| 12542 | static int __intel_set_mode_checks(struct drm_atomic_state *state) | 13003 | shared_dpll = intel_atomic_get_shared_dpll_state(state); |
| 12543 | { | ||
| 12544 | struct drm_device *dev = state->dev; | ||
| 12545 | int ret; | ||
| 12546 | 13004 | ||
| 12547 | /* | 13005 | shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe); |
| 12548 | * See if the config requires any additional preparation, e.g. | ||
| 12549 | * to adjust global state with pipes off. We need to do this | ||
| 12550 | * here so we can get the modeset_pipe updated config for the new | ||
| 12551 | * mode set on this crtc. For other crtcs we need to use the | ||
| 12552 | * adjusted_mode bits in the crtc directly. | ||
| 12553 | */ | ||
| 12554 | if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) { | ||
| 12555 | ret = valleyview_modeset_global_pipes(state); | ||
| 12556 | if (ret) | ||
| 12557 | return ret; | ||
| 12558 | } | 13006 | } |
| 12559 | |||
| 12560 | ret = __intel_set_mode_setup_plls(state); | ||
| 12561 | if (ret) | ||
| 12562 | return ret; | ||
| 12563 | |||
| 12564 | return 0; | ||
| 12565 | } | 13007 | } |
| 12566 | 13008 | ||
| 12567 | static int __intel_set_mode(struct drm_crtc *modeset_crtc, | 13009 | /* |
| 12568 | struct intel_crtc_state *pipe_config) | 13010 | * This implements the workaround described in the "notes" section of the mode |
| 13011 | * set sequence documentation. When going from no pipes or single pipe to | ||
| 13012 | * multiple pipes, and planes are enabled after the pipe, we need to wait at | ||
| 13013 | * least 2 vblanks on the first pipe before enabling planes on the second pipe. | ||
| 13014 | */ | ||
| 13015 | static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) | ||
| 12569 | { | 13016 | { |
| 12570 | struct drm_device *dev = modeset_crtc->dev; | ||
| 12571 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 12572 | struct drm_atomic_state *state = pipe_config->base.state; | ||
| 12573 | struct drm_crtc *crtc; | ||
| 12574 | struct drm_crtc_state *crtc_state; | 13017 | struct drm_crtc_state *crtc_state; |
| 12575 | int ret = 0; | 13018 | struct intel_crtc *intel_crtc; |
| 13019 | struct drm_crtc *crtc; | ||
| 13020 | struct intel_crtc_state *first_crtc_state = NULL; | ||
| 13021 | struct intel_crtc_state *other_crtc_state = NULL; | ||
| 13022 | enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; | ||
| 12576 | int i; | 13023 | int i; |
| 12577 | 13024 | ||
| 12578 | ret = __intel_set_mode_checks(state); | 13025 | /* look at all crtc's that are going to be enabled in during modeset */ |
| 12579 | if (ret < 0) | ||
| 12580 | return ret; | ||
| 12581 | |||
| 12582 | ret = drm_atomic_helper_prepare_planes(dev, state); | ||
| 12583 | if (ret) | ||
| 12584 | return ret; | ||
| 12585 | |||
| 12586 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 13026 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 12587 | if (!needs_modeset(crtc_state)) | 13027 | intel_crtc = to_intel_crtc(crtc); |
| 13028 | |||
| 13029 | if (!crtc_state->active || !needs_modeset(crtc_state)) | ||
| 12588 | continue; | 13030 | continue; |
| 12589 | 13031 | ||
| 12590 | if (!crtc_state->enable) { | 13032 | if (first_crtc_state) { |
| 12591 | if (crtc->state->enable) | 13033 | other_crtc_state = to_intel_crtc_state(crtc_state); |
| 12592 | intel_crtc_disable(crtc); | 13034 | break; |
| 12593 | } else if (crtc->state->enable) { | 13035 | } else { |
| 12594 | intel_crtc_disable_planes(crtc); | 13036 | first_crtc_state = to_intel_crtc_state(crtc_state); |
| 12595 | dev_priv->display.crtc_disable(crtc); | 13037 | first_pipe = intel_crtc->pipe; |
| 12596 | } | 13038 | } |
| 12597 | } | 13039 | } |
| 12598 | 13040 | ||
| 12599 | /* crtc->mode is already used by the ->mode_set callbacks, hence we need | 13041 | /* No workaround needed? */ |
| 12600 | * to set it here already despite that we pass it down the callchain. | 13042 | if (!first_crtc_state) |
| 12601 | * | 13043 | return 0; |
| 12602 | * Note we'll need to fix this up when we start tracking multiple | ||
| 12603 | * pipes; here we assume a single modeset_pipe and only track the | ||
| 12604 | * single crtc and mode. | ||
| 12605 | */ | ||
| 12606 | if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) { | ||
| 12607 | modeset_crtc->mode = pipe_config->base.mode; | ||
| 12608 | |||
| 12609 | /* | ||
| 12610 | * Calculate and store various constants which | ||
| 12611 | * are later needed by vblank and swap-completion | ||
| 12612 | * timestamping. They are derived from true hwmode. | ||
| 12613 | */ | ||
| 12614 | drm_calc_timestamping_constants(modeset_crtc, | ||
| 12615 | &pipe_config->base.adjusted_mode); | ||
| 12616 | } | ||
| 12617 | 13044 | ||
| 12618 | /* Only after disabling all output pipelines that will be changed can we | 13045 | /* w/a possibly needed, check how many crtc's are already enabled. */ |
| 12619 | * update the the output configuration. */ | 13046 | for_each_intel_crtc(state->dev, intel_crtc) { |
| 12620 | intel_modeset_update_state(state); | 13047 | struct intel_crtc_state *pipe_config; |
| 12621 | 13048 | ||
| 12622 | /* The state has been swaped above, so state actually contains the | 13049 | pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); |
| 12623 | * old state now. */ | 13050 | if (IS_ERR(pipe_config)) |
| 13051 | return PTR_ERR(pipe_config); | ||
| 12624 | 13052 | ||
| 12625 | modeset_update_crtc_power_domains(state); | 13053 | pipe_config->hsw_workaround_pipe = INVALID_PIPE; |
| 12626 | 13054 | ||
| 12627 | drm_atomic_helper_commit_planes(dev, state); | 13055 | if (!pipe_config->base.active || |
| 12628 | 13056 | needs_modeset(&pipe_config->base)) | |
| 12629 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ | ||
| 12630 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
| 12631 | if (!needs_modeset(crtc->state) || !crtc->state->enable) | ||
| 12632 | continue; | 13057 | continue; |
| 12633 | 13058 | ||
| 12634 | update_scanline_offset(to_intel_crtc(crtc)); | 13059 | /* 2 or more enabled crtcs means no need for w/a */ |
| 13060 | if (enabled_pipe != INVALID_PIPE) | ||
| 13061 | return 0; | ||
| 12635 | 13062 | ||
| 12636 | dev_priv->display.crtc_enable(crtc); | 13063 | enabled_pipe = intel_crtc->pipe; |
| 12637 | intel_crtc_enable_planes(crtc); | ||
| 12638 | } | 13064 | } |
| 12639 | 13065 | ||
| 12640 | /* FIXME: add subpixel order */ | 13066 | if (enabled_pipe != INVALID_PIPE) |
| 12641 | 13067 | first_crtc_state->hsw_workaround_pipe = enabled_pipe; | |
| 12642 | drm_atomic_helper_cleanup_planes(dev, state); | 13068 | else if (other_crtc_state) |
| 12643 | 13069 | other_crtc_state->hsw_workaround_pipe = first_pipe; | |
| 12644 | drm_atomic_state_free(state); | ||
| 12645 | 13070 | ||
| 12646 | return 0; | 13071 | return 0; |
| 12647 | } | 13072 | } |
| 12648 | 13073 | ||
| 12649 | static int intel_set_mode_with_config(struct drm_crtc *crtc, | 13074 | static int intel_modeset_all_pipes(struct drm_atomic_state *state) |
| 12650 | struct intel_crtc_state *pipe_config, | ||
| 12651 | bool force_restore) | ||
| 12652 | { | 13075 | { |
| 12653 | int ret; | 13076 | struct drm_crtc *crtc; |
| 13077 | struct drm_crtc_state *crtc_state; | ||
| 13078 | int ret = 0; | ||
| 12654 | 13079 | ||
| 12655 | ret = __intel_set_mode(crtc, pipe_config); | 13080 | /* add all active pipes to the state */ |
| 13081 | for_each_crtc(state->dev, crtc) { | ||
| 13082 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 13083 | if (IS_ERR(crtc_state)) | ||
| 13084 | return PTR_ERR(crtc_state); | ||
| 12656 | 13085 | ||
| 12657 | if (ret == 0 && force_restore) { | 13086 | if (!crtc_state->active || needs_modeset(crtc_state)) |
| 12658 | intel_modeset_update_staged_output_state(crtc->dev); | 13087 | continue; |
| 12659 | intel_modeset_check_state(crtc->dev); | ||
| 12660 | } | ||
| 12661 | 13088 | ||
| 12662 | return ret; | 13089 | crtc_state->mode_changed = true; |
| 12663 | } | ||
| 12664 | 13090 | ||
| 12665 | static int intel_set_mode(struct drm_crtc *crtc, | 13091 | ret = drm_atomic_add_affected_connectors(state, crtc); |
| 12666 | struct drm_atomic_state *state, | 13092 | if (ret) |
| 12667 | bool force_restore) | 13093 | break; |
| 12668 | { | ||
| 12669 | struct intel_crtc_state *pipe_config; | ||
| 12670 | int ret = 0; | ||
| 12671 | 13094 | ||
| 12672 | pipe_config = intel_modeset_compute_config(crtc, state); | 13095 | ret = drm_atomic_add_affected_planes(state, crtc); |
| 12673 | if (IS_ERR(pipe_config)) { | 13096 | if (ret) |
| 12674 | ret = PTR_ERR(pipe_config); | 13097 | break; |
| 12675 | goto out; | ||
| 12676 | } | 13098 | } |
| 12677 | 13099 | ||
| 12678 | ret = intel_set_mode_with_config(crtc, pipe_config, force_restore); | ||
| 12679 | if (ret) | ||
| 12680 | goto out; | ||
| 12681 | |||
| 12682 | out: | ||
| 12683 | return ret; | 13100 | return ret; |
| 12684 | } | 13101 | } |
| 12685 | 13102 | ||
| 12686 | void intel_crtc_restore_mode(struct drm_crtc *crtc) | 13103 | |
| 13104 | static int intel_modeset_checks(struct drm_atomic_state *state) | ||
| 12687 | { | 13105 | { |
| 12688 | struct drm_device *dev = crtc->dev; | 13106 | struct drm_device *dev = state->dev; |
| 12689 | struct drm_atomic_state *state; | 13107 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 12690 | struct intel_encoder *encoder; | ||
| 12691 | struct intel_connector *connector; | ||
| 12692 | struct drm_connector_state *connector_state; | ||
| 12693 | struct intel_crtc_state *crtc_state; | ||
| 12694 | int ret; | 13108 | int ret; |
| 12695 | 13109 | ||
| 12696 | state = drm_atomic_state_alloc(dev); | 13110 | if (!check_digital_port_conflicts(state)) { |
| 12697 | if (!state) { | 13111 | DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); |
| 12698 | DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory", | 13112 | return -EINVAL; |
| 12699 | crtc->base.id); | ||
| 12700 | return; | ||
| 12701 | } | ||
| 12702 | |||
| 12703 | state->acquire_ctx = dev->mode_config.acquire_ctx; | ||
| 12704 | |||
| 12705 | /* The force restore path in the HW readout code relies on the staged | ||
| 12706 | * config still keeping the user requested config while the actual | ||
| 12707 | * state has been overwritten by the configuration read from HW. We | ||
| 12708 | * need to copy the staged config to the atomic state, otherwise the | ||
| 12709 | * mode set will just reapply the state the HW is already in. */ | ||
| 12710 | for_each_intel_encoder(dev, encoder) { | ||
| 12711 | if (&encoder->new_crtc->base != crtc) | ||
| 12712 | continue; | ||
| 12713 | |||
| 12714 | for_each_intel_connector(dev, connector) { | ||
| 12715 | if (connector->new_encoder != encoder) | ||
| 12716 | continue; | ||
| 12717 | |||
| 12718 | connector_state = drm_atomic_get_connector_state(state, &connector->base); | ||
| 12719 | if (IS_ERR(connector_state)) { | ||
| 12720 | DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n", | ||
| 12721 | connector->base.base.id, | ||
| 12722 | connector->base.name, | ||
| 12723 | PTR_ERR(connector_state)); | ||
| 12724 | continue; | ||
| 12725 | } | ||
| 12726 | |||
| 12727 | connector_state->crtc = crtc; | ||
| 12728 | connector_state->best_encoder = &encoder->base; | ||
| 12729 | } | ||
| 12730 | } | ||
| 12731 | |||
| 12732 | crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc)); | ||
| 12733 | if (IS_ERR(crtc_state)) { | ||
| 12734 | DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n", | ||
| 12735 | crtc->base.id, PTR_ERR(crtc_state)); | ||
| 12736 | drm_atomic_state_free(state); | ||
| 12737 | return; | ||
| 12738 | } | 13113 | } |
| 12739 | 13114 | ||
| 12740 | crtc_state->base.active = crtc_state->base.enable = | 13115 | /* |
| 12741 | to_intel_crtc(crtc)->new_enabled; | 13116 | * See if the config requires any additional preparation, e.g. |
| 12742 | 13117 | * to adjust global state with pipes off. We need to do this | |
| 12743 | drm_mode_copy(&crtc_state->base.mode, &crtc->mode); | 13118 | * here so we can get the modeset_pipe updated config for the new |
| 13119 | * mode set on this crtc. For other crtcs we need to use the | ||
| 13120 | * adjusted_mode bits in the crtc directly. | ||
| 13121 | */ | ||
| 13122 | if (dev_priv->display.modeset_calc_cdclk) { | ||
| 13123 | unsigned int cdclk; | ||
| 12744 | 13124 | ||
| 12745 | intel_modeset_setup_plane_state(state, crtc, &crtc->mode, | 13125 | ret = dev_priv->display.modeset_calc_cdclk(state); |
| 12746 | crtc->primary->fb, crtc->x, crtc->y); | ||
| 12747 | 13126 | ||
| 12748 | ret = intel_set_mode(crtc, state, false); | 13127 | cdclk = to_intel_atomic_state(state)->cdclk; |
| 12749 | if (ret) | 13128 | if (!ret && cdclk != dev_priv->cdclk_freq) |
| 12750 | drm_atomic_state_free(state); | 13129 | ret = intel_modeset_all_pipes(state); |
| 12751 | } | ||
| 12752 | 13130 | ||
| 12753 | #undef for_each_intel_crtc_masked | 13131 | if (ret < 0) |
| 13132 | return ret; | ||
| 13133 | } else | ||
| 13134 | to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq; | ||
| 12754 | 13135 | ||
| 12755 | static bool intel_connector_in_mode_set(struct intel_connector *connector, | 13136 | intel_modeset_clear_plls(state); |
| 12756 | struct drm_mode_set *set) | ||
| 12757 | { | ||
| 12758 | int ro; | ||
| 12759 | 13137 | ||
| 12760 | for (ro = 0; ro < set->num_connectors; ro++) | 13138 | if (IS_HASWELL(dev)) |
| 12761 | if (set->connectors[ro] == &connector->base) | 13139 | return haswell_mode_set_planes_workaround(state); |
| 12762 | return true; | ||
| 12763 | 13140 | ||
| 12764 | return false; | 13141 | return 0; |
| 12765 | } | 13142 | } |
| 12766 | 13143 | ||
| 12767 | static int | 13144 | /** |
| 12768 | intel_modeset_stage_output_state(struct drm_device *dev, | 13145 | * intel_atomic_check - validate state object |
| 12769 | struct drm_mode_set *set, | 13146 | * @dev: drm device |
| 12770 | struct drm_atomic_state *state) | 13147 | * @state: state to validate |
| 13148 | */ | ||
| 13149 | static int intel_atomic_check(struct drm_device *dev, | ||
| 13150 | struct drm_atomic_state *state) | ||
| 12771 | { | 13151 | { |
| 12772 | struct intel_connector *connector; | ||
| 12773 | struct drm_connector *drm_connector; | ||
| 12774 | struct drm_connector_state *connector_state; | ||
| 12775 | struct drm_crtc *crtc; | 13152 | struct drm_crtc *crtc; |
| 12776 | struct drm_crtc_state *crtc_state; | 13153 | struct drm_crtc_state *crtc_state; |
| 12777 | int i, ret; | 13154 | int ret, i; |
| 13155 | bool any_ms = false; | ||
| 12778 | 13156 | ||
| 12779 | /* The upper layers ensure that we either disable a crtc or have a list | 13157 | ret = drm_atomic_helper_check_modeset(dev, state); |
| 12780 | * of connectors. For paranoia, double-check this. */ | 13158 | if (ret) |
| 12781 | WARN_ON(!set->fb && (set->num_connectors != 0)); | 13159 | return ret; |
| 12782 | WARN_ON(set->fb && (set->num_connectors == 0)); | ||
| 12783 | |||
| 12784 | for_each_intel_connector(dev, connector) { | ||
| 12785 | bool in_mode_set = intel_connector_in_mode_set(connector, set); | ||
| 12786 | 13160 | ||
| 12787 | if (!in_mode_set && connector->base.state->crtc != set->crtc) | 13161 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 12788 | continue; | 13162 | struct intel_crtc_state *pipe_config = |
| 13163 | to_intel_crtc_state(crtc_state); | ||
| 12789 | 13164 | ||
| 12790 | connector_state = | 13165 | /* Catch I915_MODE_FLAG_INHERITED */ |
| 12791 | drm_atomic_get_connector_state(state, &connector->base); | 13166 | if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) |
| 12792 | if (IS_ERR(connector_state)) | 13167 | crtc_state->mode_changed = true; |
| 12793 | return PTR_ERR(connector_state); | ||
| 12794 | 13168 | ||
| 12795 | if (in_mode_set) { | 13169 | if (!crtc_state->enable) { |
| 12796 | int pipe = to_intel_crtc(set->crtc)->pipe; | 13170 | if (needs_modeset(crtc_state)) |
| 12797 | connector_state->best_encoder = | 13171 | any_ms = true; |
| 12798 | &intel_find_encoder(connector, pipe)->base; | 13172 | continue; |
| 12799 | } | 13173 | } |
| 12800 | 13174 | ||
| 12801 | if (connector->base.state->crtc != set->crtc) | 13175 | if (!needs_modeset(crtc_state)) |
| 12802 | continue; | 13176 | continue; |
| 12803 | 13177 | ||
| 12804 | /* If we disable the crtc, disable all its connectors. Also, if | 13178 | /* FIXME: For only active_changed we shouldn't need to do any |
| 12805 | * the connector is on the changing crtc but not on the new | 13179 | * state recomputation at all. */ |
| 12806 | * connector list, disable it. */ | ||
| 12807 | if (!set->fb || !in_mode_set) { | ||
| 12808 | connector_state->best_encoder = NULL; | ||
| 12809 | 13180 | ||
| 12810 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", | 13181 | ret = drm_atomic_add_affected_connectors(state, crtc); |
| 12811 | connector->base.base.id, | 13182 | if (ret) |
| 12812 | connector->base.name); | 13183 | return ret; |
| 12813 | } | ||
| 12814 | } | ||
| 12815 | /* connector->new_encoder is now updated for all connectors. */ | ||
| 12816 | |||
| 12817 | for_each_connector_in_state(state, drm_connector, connector_state, i) { | ||
| 12818 | connector = to_intel_connector(drm_connector); | ||
| 12819 | 13184 | ||
| 12820 | if (!connector_state->best_encoder) { | 13185 | ret = intel_modeset_pipe_config(crtc, pipe_config); |
| 12821 | ret = drm_atomic_set_crtc_for_connector(connector_state, | 13186 | if (ret) |
| 12822 | NULL); | 13187 | return ret; |
| 12823 | if (ret) | ||
| 12824 | return ret; | ||
| 12825 | 13188 | ||
| 12826 | continue; | 13189 | if (i915.fastboot && |
| 13190 | intel_pipe_config_compare(state->dev, | ||
| 13191 | to_intel_crtc_state(crtc->state), | ||
| 13192 | pipe_config, true)) { | ||
| 13193 | crtc_state->mode_changed = false; | ||
| 12827 | } | 13194 | } |
| 12828 | 13195 | ||
| 12829 | if (intel_connector_in_mode_set(connector, set)) { | 13196 | if (needs_modeset(crtc_state)) { |
| 12830 | struct drm_crtc *crtc = connector->base.state->crtc; | 13197 | any_ms = true; |
| 12831 | |||
| 12832 | /* If this connector was in a previous crtc, add it | ||
| 12833 | * to the state. We might need to disable it. */ | ||
| 12834 | if (crtc) { | ||
| 12835 | crtc_state = | ||
| 12836 | drm_atomic_get_crtc_state(state, crtc); | ||
| 12837 | if (IS_ERR(crtc_state)) | ||
| 12838 | return PTR_ERR(crtc_state); | ||
| 12839 | } | ||
| 12840 | 13198 | ||
| 12841 | ret = drm_atomic_set_crtc_for_connector(connector_state, | 13199 | ret = drm_atomic_add_affected_planes(state, crtc); |
| 12842 | set->crtc); | ||
| 12843 | if (ret) | 13200 | if (ret) |
| 12844 | return ret; | 13201 | return ret; |
| 12845 | } | 13202 | } |
| 12846 | 13203 | ||
| 12847 | /* Make sure the new CRTC will work with the encoder */ | 13204 | intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, |
| 12848 | if (!drm_encoder_crtc_ok(connector_state->best_encoder, | 13205 | needs_modeset(crtc_state) ? |
| 12849 | connector_state->crtc)) { | 13206 | "[modeset]" : "[fastset]"); |
| 12850 | return -EINVAL; | ||
| 12851 | } | ||
| 12852 | |||
| 12853 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", | ||
| 12854 | connector->base.base.id, | ||
| 12855 | connector->base.name, | ||
| 12856 | connector_state->crtc->base.id); | ||
| 12857 | |||
| 12858 | if (connector_state->best_encoder != &connector->encoder->base) | ||
| 12859 | connector->encoder = | ||
| 12860 | to_intel_encoder(connector_state->best_encoder); | ||
| 12861 | } | 13207 | } |
| 12862 | 13208 | ||
| 12863 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 13209 | if (any_ms) { |
| 12864 | bool has_connectors; | 13210 | ret = intel_modeset_checks(state); |
| 12865 | 13211 | ||
| 12866 | ret = drm_atomic_add_affected_connectors(state, crtc); | ||
| 12867 | if (ret) | 13212 | if (ret) |
| 12868 | return ret; | 13213 | return ret; |
| 13214 | } else | ||
| 13215 | to_intel_atomic_state(state)->cdclk = | ||
| 13216 | to_i915(state->dev)->cdclk_freq; | ||
| 12869 | 13217 | ||
| 12870 | has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc); | 13218 | return drm_atomic_helper_check_planes(state->dev, state); |
| 12871 | if (has_connectors != crtc_state->enable) | 13219 | } |
| 12872 | crtc_state->enable = | 13220 | |
| 12873 | crtc_state->active = has_connectors; | 13221 | /** |
| 13222 | * intel_atomic_commit - commit validated state object | ||
| 13223 | * @dev: DRM device | ||
| 13224 | * @state: the top-level driver state object | ||
| 13225 | * @async: asynchronous commit | ||
| 13226 | * | ||
| 13227 | * This function commits a top-level state object that has been validated | ||
| 13228 | * with drm_atomic_helper_check(). | ||
| 13229 | * | ||
| 13230 | * FIXME: Atomic modeset support for i915 is not yet complete. At the moment | ||
| 13231 | * we can only handle plane-related operations and do not yet support | ||
| 13232 | * asynchronous commit. | ||
| 13233 | * | ||
| 13234 | * RETURNS | ||
| 13235 | * Zero for success or -errno. | ||
| 13236 | */ | ||
| 13237 | static int intel_atomic_commit(struct drm_device *dev, | ||
| 13238 | struct drm_atomic_state *state, | ||
| 13239 | bool async) | ||
| 13240 | { | ||
| 13241 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 13242 | struct drm_crtc *crtc; | ||
| 13243 | struct drm_crtc_state *crtc_state; | ||
| 13244 | int ret = 0; | ||
| 13245 | int i; | ||
| 13246 | bool any_ms = false; | ||
| 13247 | |||
| 13248 | if (async) { | ||
| 13249 | DRM_DEBUG_KMS("i915 does not yet support async commit\n"); | ||
| 13250 | return -EINVAL; | ||
| 12874 | } | 13251 | } |
| 12875 | 13252 | ||
| 12876 | ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode, | 13253 | ret = drm_atomic_helper_prepare_planes(dev, state); |
| 12877 | set->fb, set->x, set->y); | ||
| 12878 | if (ret) | 13254 | if (ret) |
| 12879 | return ret; | 13255 | return ret; |
| 12880 | 13256 | ||
| 12881 | crtc_state = drm_atomic_get_crtc_state(state, set->crtc); | 13257 | drm_atomic_helper_swap_state(dev, state); |
| 12882 | if (IS_ERR(crtc_state)) | ||
| 12883 | return PTR_ERR(crtc_state); | ||
| 12884 | |||
| 12885 | if (set->mode) | ||
| 12886 | drm_mode_copy(&crtc_state->mode, set->mode); | ||
| 12887 | 13258 | ||
| 12888 | if (set->num_connectors) | 13259 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 12889 | crtc_state->active = true; | 13260 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 12890 | 13261 | ||
| 12891 | return 0; | 13262 | if (!needs_modeset(crtc->state)) |
| 12892 | } | 13263 | continue; |
| 12893 | 13264 | ||
| 12894 | static bool primary_plane_visible(struct drm_crtc *crtc) | 13265 | any_ms = true; |
| 12895 | { | 13266 | intel_pre_plane_update(intel_crtc); |
| 12896 | struct intel_plane_state *plane_state = | ||
| 12897 | to_intel_plane_state(crtc->primary->state); | ||
| 12898 | 13267 | ||
| 12899 | return plane_state->visible; | 13268 | if (crtc_state->active) { |
| 12900 | } | 13269 | intel_crtc_disable_planes(crtc, crtc_state->plane_mask); |
| 13270 | dev_priv->display.crtc_disable(crtc); | ||
| 13271 | intel_crtc->active = false; | ||
| 13272 | intel_disable_shared_dpll(intel_crtc); | ||
| 13273 | } | ||
| 13274 | } | ||
| 12901 | 13275 | ||
| 12902 | static int intel_crtc_set_config(struct drm_mode_set *set) | 13276 | /* Only after disabling all output pipelines that will be changed can we |
| 12903 | { | 13277 | * update the the output configuration. */ |
| 12904 | struct drm_device *dev; | 13278 | intel_modeset_update_state(state); |
| 12905 | struct drm_atomic_state *state = NULL; | ||
| 12906 | struct intel_crtc_state *pipe_config; | ||
| 12907 | bool primary_plane_was_visible; | ||
| 12908 | int ret; | ||
| 12909 | 13279 | ||
| 12910 | BUG_ON(!set); | 13280 | /* The state has been swaped above, so state actually contains the |
| 12911 | BUG_ON(!set->crtc); | 13281 | * old state now. */ |
| 12912 | BUG_ON(!set->crtc->helper_private); | 13282 | if (any_ms) |
| 13283 | modeset_update_crtc_power_domains(state); | ||
| 12913 | 13284 | ||
| 12914 | /* Enforce sane interface api - has been abused by the fb helper. */ | 13285 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
| 12915 | BUG_ON(!set->mode && set->fb); | 13286 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 12916 | BUG_ON(set->fb && set->num_connectors == 0); | 13287 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 13288 | bool modeset = needs_modeset(crtc->state); | ||
| 12917 | 13289 | ||
| 12918 | if (set->fb) { | 13290 | if (modeset && crtc->state->active) { |
| 12919 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", | 13291 | update_scanline_offset(to_intel_crtc(crtc)); |
| 12920 | set->crtc->base.id, set->fb->base.id, | 13292 | dev_priv->display.crtc_enable(crtc); |
| 12921 | (int)set->num_connectors, set->x, set->y); | 13293 | } |
| 12922 | } else { | ||
| 12923 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); | ||
| 12924 | } | ||
| 12925 | 13294 | ||
| 12926 | dev = set->crtc->dev; | 13295 | if (!modeset) |
| 13296 | intel_pre_plane_update(intel_crtc); | ||
| 12927 | 13297 | ||
| 12928 | state = drm_atomic_state_alloc(dev); | 13298 | drm_atomic_helper_commit_planes_on_crtc(crtc_state); |
| 12929 | if (!state) | 13299 | intel_post_plane_update(intel_crtc); |
| 12930 | return -ENOMEM; | 13300 | } |
| 12931 | 13301 | ||
| 12932 | state->acquire_ctx = dev->mode_config.acquire_ctx; | 13302 | /* FIXME: add subpixel order */ |
| 12933 | 13303 | ||
| 12934 | ret = intel_modeset_stage_output_state(dev, set, state); | 13304 | drm_atomic_helper_wait_for_vblanks(dev, state); |
| 12935 | if (ret) | 13305 | drm_atomic_helper_cleanup_planes(dev, state); |
| 12936 | goto out; | 13306 | drm_atomic_state_free(state); |
| 12937 | 13307 | ||
| 12938 | pipe_config = intel_modeset_compute_config(set->crtc, state); | 13308 | if (any_ms) |
| 12939 | if (IS_ERR(pipe_config)) { | 13309 | intel_modeset_check_state(dev); |
| 12940 | ret = PTR_ERR(pipe_config); | ||
| 12941 | goto out; | ||
| 12942 | } | ||
| 12943 | 13310 | ||
| 12944 | intel_update_pipe_size(to_intel_crtc(set->crtc)); | 13311 | return 0; |
| 13312 | } | ||
| 12945 | 13313 | ||
| 12946 | primary_plane_was_visible = primary_plane_visible(set->crtc); | 13314 | void intel_crtc_restore_mode(struct drm_crtc *crtc) |
| 13315 | { | ||
| 13316 | struct drm_device *dev = crtc->dev; | ||
| 13317 | struct drm_atomic_state *state; | ||
| 13318 | struct drm_crtc_state *crtc_state; | ||
| 13319 | int ret; | ||
| 12947 | 13320 | ||
| 12948 | ret = intel_set_mode_with_config(set->crtc, pipe_config, true); | 13321 | state = drm_atomic_state_alloc(dev); |
| 13322 | if (!state) { | ||
| 13323 | DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", | ||
| 13324 | crtc->base.id); | ||
| 13325 | return; | ||
| 13326 | } | ||
| 12949 | 13327 | ||
| 12950 | if (ret == 0 && | 13328 | state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); |
| 12951 | pipe_config->base.enable && | ||
| 12952 | pipe_config->base.planes_changed && | ||
| 12953 | !needs_modeset(&pipe_config->base)) { | ||
| 12954 | struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); | ||
| 12955 | 13329 | ||
| 12956 | /* | 13330 | retry: |
| 12957 | * We need to make sure the primary plane is re-enabled if it | 13331 | crtc_state = drm_atomic_get_crtc_state(state, crtc); |
| 12958 | * has previously been turned off. | 13332 | ret = PTR_ERR_OR_ZERO(crtc_state); |
| 12959 | */ | 13333 | if (!ret) { |
| 12960 | if (ret == 0 && !primary_plane_was_visible && | 13334 | if (!crtc_state->active) |
| 12961 | primary_plane_visible(set->crtc)) { | 13335 | goto out; |
| 12962 | WARN_ON(!intel_crtc->active); | ||
| 12963 | intel_post_enable_primary(set->crtc); | ||
| 12964 | } | ||
| 12965 | 13336 | ||
| 12966 | /* | 13337 | crtc_state->mode_changed = true; |
| 12967 | * In the fastboot case this may be our only check of the | 13338 | ret = drm_atomic_commit(state); |
| 12968 | * state after boot. It would be better to only do it on | ||
| 12969 | * the first update, but we don't have a nice way of doing that | ||
| 12970 | * (and really, set_config isn't used much for high freq page | ||
| 12971 | * flipping, so increasing its cost here shouldn't be a big | ||
| 12972 | * deal). | ||
| 12973 | */ | ||
| 12974 | if (i915.fastboot && ret == 0) | ||
| 12975 | intel_modeset_check_state(set->crtc->dev); | ||
| 12976 | } | 13339 | } |
| 12977 | 13340 | ||
| 12978 | if (ret) { | 13341 | if (ret == -EDEADLK) { |
| 12979 | DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", | 13342 | drm_atomic_state_clear(state); |
| 12980 | set->crtc->base.id, ret); | 13343 | drm_modeset_backoff(state->acquire_ctx); |
| 13344 | goto retry; | ||
| 12981 | } | 13345 | } |
| 12982 | 13346 | ||
| 12983 | out: | ||
| 12984 | if (ret) | 13347 | if (ret) |
| 13348 | out: | ||
| 12985 | drm_atomic_state_free(state); | 13349 | drm_atomic_state_free(state); |
| 12986 | return ret; | ||
| 12987 | } | 13350 | } |
| 12988 | 13351 | ||
| 13352 | #undef for_each_intel_crtc_masked | ||
| 13353 | |||
| 12989 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 13354 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
| 12990 | .gamma_set = intel_crtc_gamma_set, | 13355 | .gamma_set = intel_crtc_gamma_set, |
| 12991 | .set_config = intel_crtc_set_config, | 13356 | .set_config = drm_atomic_helper_set_config, |
| 12992 | .destroy = intel_crtc_destroy, | 13357 | .destroy = intel_crtc_destroy, |
| 12993 | .page_flip = intel_crtc_page_flip, | 13358 | .page_flip = intel_crtc_page_flip, |
| 12994 | .atomic_duplicate_state = intel_crtc_duplicate_state, | 13359 | .atomic_duplicate_state = intel_crtc_duplicate_state, |
| @@ -13085,6 +13450,8 @@ static void intel_shared_dpll_init(struct drm_device *dev) | |||
| 13085 | { | 13450 | { |
| 13086 | struct drm_i915_private *dev_priv = dev->dev_private; | 13451 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 13087 | 13452 | ||
| 13453 | intel_update_cdclk(dev); | ||
| 13454 | |||
| 13088 | if (HAS_DDI(dev)) | 13455 | if (HAS_DDI(dev)) |
| 13089 | intel_ddi_pll_init(dev); | 13456 | intel_ddi_pll_init(dev); |
| 13090 | else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | 13457 | else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
| @@ -13096,28 +13463,6 @@ static void intel_shared_dpll_init(struct drm_device *dev) | |||
| 13096 | } | 13463 | } |
| 13097 | 13464 | ||
| 13098 | /** | 13465 | /** |
| 13099 | * intel_wm_need_update - Check whether watermarks need updating | ||
| 13100 | * @plane: drm plane | ||
| 13101 | * @state: new plane state | ||
| 13102 | * | ||
| 13103 | * Check current plane state versus the new one to determine whether | ||
| 13104 | * watermarks need to be recalculated. | ||
| 13105 | * | ||
| 13106 | * Returns true or false. | ||
| 13107 | */ | ||
| 13108 | bool intel_wm_need_update(struct drm_plane *plane, | ||
| 13109 | struct drm_plane_state *state) | ||
| 13110 | { | ||
| 13111 | /* Update watermarks on tiling changes. */ | ||
| 13112 | if (!plane->state->fb || !state->fb || | ||
| 13113 | plane->state->fb->modifier[0] != state->fb->modifier[0] || | ||
| 13114 | plane->state->rotation != state->rotation) | ||
| 13115 | return true; | ||
| 13116 | |||
| 13117 | return false; | ||
| 13118 | } | ||
| 13119 | |||
| 13120 | /** | ||
| 13121 | * intel_prepare_plane_fb - Prepare fb for usage on plane | 13466 | * intel_prepare_plane_fb - Prepare fb for usage on plane |
| 13122 | * @plane: drm plane to prepare for | 13467 | * @plane: drm plane to prepare for |
| 13123 | * @fb: framebuffer to prepare for presentation | 13468 | * @fb: framebuffer to prepare for presentation |
| @@ -13136,27 +13481,13 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 13136 | { | 13481 | { |
| 13137 | struct drm_device *dev = plane->dev; | 13482 | struct drm_device *dev = plane->dev; |
| 13138 | struct intel_plane *intel_plane = to_intel_plane(plane); | 13483 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 13139 | enum pipe pipe = intel_plane->pipe; | ||
| 13140 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 13484 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 13141 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); | 13485 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); |
| 13142 | unsigned frontbuffer_bits = 0; | ||
| 13143 | int ret = 0; | 13486 | int ret = 0; |
| 13144 | 13487 | ||
| 13145 | if (!obj) | 13488 | if (!obj) |
| 13146 | return 0; | 13489 | return 0; |
| 13147 | 13490 | ||
| 13148 | switch (plane->type) { | ||
| 13149 | case DRM_PLANE_TYPE_PRIMARY: | ||
| 13150 | frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe); | ||
| 13151 | break; | ||
| 13152 | case DRM_PLANE_TYPE_CURSOR: | ||
| 13153 | frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe); | ||
| 13154 | break; | ||
| 13155 | case DRM_PLANE_TYPE_OVERLAY: | ||
| 13156 | frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe); | ||
| 13157 | break; | ||
| 13158 | } | ||
| 13159 | |||
| 13160 | mutex_lock(&dev->struct_mutex); | 13491 | mutex_lock(&dev->struct_mutex); |
| 13161 | 13492 | ||
| 13162 | if (plane->type == DRM_PLANE_TYPE_CURSOR && | 13493 | if (plane->type == DRM_PLANE_TYPE_CURSOR && |
| @@ -13166,11 +13497,11 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 13166 | if (ret) | 13497 | if (ret) |
| 13167 | DRM_DEBUG_KMS("failed to attach phys object\n"); | 13498 | DRM_DEBUG_KMS("failed to attach phys object\n"); |
| 13168 | } else { | 13499 | } else { |
| 13169 | ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL); | 13500 | ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL); |
| 13170 | } | 13501 | } |
| 13171 | 13502 | ||
| 13172 | if (ret == 0) | 13503 | if (ret == 0) |
| 13173 | i915_gem_track_fb(old_obj, obj, frontbuffer_bits); | 13504 | i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); |
| 13174 | 13505 | ||
| 13175 | mutex_unlock(&dev->struct_mutex); | 13506 | mutex_unlock(&dev->struct_mutex); |
| 13176 | 13507 | ||
| @@ -13217,7 +13548,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state | |||
| 13217 | dev = intel_crtc->base.dev; | 13548 | dev = intel_crtc->base.dev; |
| 13218 | dev_priv = dev->dev_private; | 13549 | dev_priv = dev->dev_private; |
| 13219 | crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; | 13550 | crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; |
| 13220 | cdclk = dev_priv->display.get_display_clock_speed(dev); | 13551 | cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; |
| 13221 | 13552 | ||
| 13222 | if (!crtc_clock || !cdclk) | 13553 | if (!crtc_clock || !cdclk) |
| 13223 | return DRM_PLANE_HELPER_NO_SCALING; | 13554 | return DRM_PLANE_HELPER_NO_SCALING; |
| @@ -13235,105 +13566,28 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state | |||
| 13235 | 13566 | ||
| 13236 | static int | 13567 | static int |
| 13237 | intel_check_primary_plane(struct drm_plane *plane, | 13568 | intel_check_primary_plane(struct drm_plane *plane, |
| 13569 | struct intel_crtc_state *crtc_state, | ||
| 13238 | struct intel_plane_state *state) | 13570 | struct intel_plane_state *state) |
| 13239 | { | 13571 | { |
| 13240 | struct drm_device *dev = plane->dev; | ||
| 13241 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 13242 | struct drm_crtc *crtc = state->base.crtc; | 13572 | struct drm_crtc *crtc = state->base.crtc; |
| 13243 | struct intel_crtc *intel_crtc; | ||
| 13244 | struct intel_crtc_state *crtc_state; | ||
| 13245 | struct drm_framebuffer *fb = state->base.fb; | 13573 | struct drm_framebuffer *fb = state->base.fb; |
| 13246 | struct drm_rect *dest = &state->dst; | ||
| 13247 | struct drm_rect *src = &state->src; | ||
| 13248 | const struct drm_rect *clip = &state->clip; | ||
| 13249 | bool can_position = false; | ||
| 13250 | int max_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
| 13251 | int min_scale = DRM_PLANE_HELPER_NO_SCALING; | 13574 | int min_scale = DRM_PLANE_HELPER_NO_SCALING; |
| 13252 | int ret; | 13575 | int max_scale = DRM_PLANE_HELPER_NO_SCALING; |
| 13253 | 13576 | bool can_position = false; | |
| 13254 | crtc = crtc ? crtc : plane->crtc; | ||
| 13255 | intel_crtc = to_intel_crtc(crtc); | ||
| 13256 | crtc_state = state->base.state ? | ||
| 13257 | intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL; | ||
| 13258 | 13577 | ||
| 13259 | if (INTEL_INFO(dev)->gen >= 9) { | 13578 | /* use scaler when colorkey is not required */ |
| 13260 | /* use scaler when colorkey is not required */ | 13579 | if (INTEL_INFO(plane->dev)->gen >= 9 && |
| 13261 | if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) { | 13580 | state->ckey.flags == I915_SET_COLORKEY_NONE) { |
| 13262 | min_scale = 1; | 13581 | min_scale = 1; |
| 13263 | max_scale = skl_max_scale(intel_crtc, crtc_state); | 13582 | max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); |
| 13264 | } | ||
| 13265 | can_position = true; | 13583 | can_position = true; |
| 13266 | } | 13584 | } |
| 13267 | 13585 | ||
| 13268 | ret = drm_plane_helper_check_update(plane, crtc, fb, | 13586 | return drm_plane_helper_check_update(plane, crtc, fb, &state->src, |
| 13269 | src, dest, clip, | 13587 | &state->dst, &state->clip, |
| 13270 | min_scale, | 13588 | min_scale, max_scale, |
| 13271 | max_scale, | 13589 | can_position, true, |
| 13272 | can_position, true, | 13590 | &state->visible); |
| 13273 | &state->visible); | ||
| 13274 | if (ret) | ||
| 13275 | return ret; | ||
| 13276 | |||
| 13277 | if (crtc_state ? crtc_state->base.active : intel_crtc->active) { | ||
| 13278 | struct intel_plane_state *old_state = | ||
| 13279 | to_intel_plane_state(plane->state); | ||
| 13280 | |||
| 13281 | intel_crtc->atomic.wait_for_flips = true; | ||
| 13282 | |||
| 13283 | /* | ||
| 13284 | * FBC does not work on some platforms for rotated | ||
| 13285 | * planes, so disable it when rotation is not 0 and | ||
| 13286 | * update it when rotation is set back to 0. | ||
| 13287 | * | ||
| 13288 | * FIXME: This is redundant with the fbc update done in | ||
| 13289 | * the primary plane enable function except that that | ||
| 13290 | * one is done too late. We eventually need to unify | ||
| 13291 | * this. | ||
| 13292 | */ | ||
| 13293 | if (state->visible && | ||
| 13294 | INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && | ||
| 13295 | dev_priv->fbc.crtc == intel_crtc && | ||
| 13296 | state->base.rotation != BIT(DRM_ROTATE_0)) { | ||
| 13297 | intel_crtc->atomic.disable_fbc = true; | ||
| 13298 | } | ||
| 13299 | |||
| 13300 | if (state->visible && !old_state->visible) { | ||
| 13301 | /* | ||
| 13302 | * BDW signals flip done immediately if the plane | ||
| 13303 | * is disabled, even if the plane enable is already | ||
| 13304 | * armed to occur at the next vblank :( | ||
| 13305 | */ | ||
| 13306 | if (IS_BROADWELL(dev)) | ||
| 13307 | intel_crtc->atomic.wait_vblank = true; | ||
| 13308 | } | ||
| 13309 | |||
| 13310 | /* | ||
| 13311 | * FIXME: Actually if we will still have any other plane enabled | ||
| 13312 | * on the pipe we could let IPS enabled still, but for | ||
| 13313 | * now lets consider that when we make primary invisible | ||
| 13314 | * by setting DSPCNTR to 0 on update_primary_plane function | ||
| 13315 | * IPS needs to be disable. | ||
| 13316 | */ | ||
| 13317 | if (!state->visible || !fb) | ||
| 13318 | intel_crtc->atomic.disable_ips = true; | ||
| 13319 | |||
| 13320 | intel_crtc->atomic.fb_bits |= | ||
| 13321 | INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); | ||
| 13322 | |||
| 13323 | intel_crtc->atomic.update_fbc = true; | ||
| 13324 | |||
| 13325 | if (intel_wm_need_update(plane, &state->base)) | ||
| 13326 | intel_crtc->atomic.update_wm = true; | ||
| 13327 | } | ||
| 13328 | |||
| 13329 | if (INTEL_INFO(dev)->gen >= 9) { | ||
| 13330 | ret = skl_update_scaler_users(intel_crtc, crtc_state, | ||
| 13331 | to_intel_plane(plane), state, 0); | ||
| 13332 | if (ret) | ||
| 13333 | return ret; | ||
| 13334 | } | ||
| 13335 | |||
| 13336 | return 0; | ||
| 13337 | } | 13591 | } |
| 13338 | 13592 | ||
| 13339 | static void | 13593 | static void |
| @@ -13354,20 +13608,19 @@ intel_commit_primary_plane(struct drm_plane *plane, | |||
| 13354 | crtc->x = src->x1 >> 16; | 13608 | crtc->x = src->x1 >> 16; |
| 13355 | crtc->y = src->y1 >> 16; | 13609 | crtc->y = src->y1 >> 16; |
| 13356 | 13610 | ||
| 13357 | if (intel_crtc->active) { | 13611 | if (!crtc->state->active) |
| 13358 | if (state->visible) | 13612 | return; |
| 13359 | /* FIXME: kill this fastboot hack */ | ||
| 13360 | intel_update_pipe_size(intel_crtc); | ||
| 13361 | 13613 | ||
| 13362 | dev_priv->display.update_primary_plane(crtc, plane->fb, | 13614 | if (state->visible) |
| 13363 | crtc->x, crtc->y); | 13615 | /* FIXME: kill this fastboot hack */ |
| 13364 | } | 13616 | intel_update_pipe_size(intel_crtc); |
| 13617 | |||
| 13618 | dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y); | ||
| 13365 | } | 13619 | } |
| 13366 | 13620 | ||
| 13367 | static void | 13621 | static void |
| 13368 | intel_disable_primary_plane(struct drm_plane *plane, | 13622 | intel_disable_primary_plane(struct drm_plane *plane, |
| 13369 | struct drm_crtc *crtc, | 13623 | struct drm_crtc *crtc) |
| 13370 | bool force) | ||
| 13371 | { | 13624 | { |
| 13372 | struct drm_device *dev = plane->dev; | 13625 | struct drm_device *dev = plane->dev; |
| 13373 | struct drm_i915_private *dev_priv = dev->dev_private; | 13626 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -13378,93 +13631,25 @@ intel_disable_primary_plane(struct drm_plane *plane, | |||
| 13378 | static void intel_begin_crtc_commit(struct drm_crtc *crtc) | 13631 | static void intel_begin_crtc_commit(struct drm_crtc *crtc) |
| 13379 | { | 13632 | { |
| 13380 | struct drm_device *dev = crtc->dev; | 13633 | struct drm_device *dev = crtc->dev; |
| 13381 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 13382 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 13634 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 13383 | struct intel_plane *intel_plane; | ||
| 13384 | struct drm_plane *p; | ||
| 13385 | unsigned fb_bits = 0; | ||
| 13386 | 13635 | ||
| 13387 | /* Track fb's for any planes being disabled */ | 13636 | if (intel_crtc->atomic.update_wm_pre) |
| 13388 | list_for_each_entry(p, &dev->mode_config.plane_list, head) { | ||
| 13389 | intel_plane = to_intel_plane(p); | ||
| 13390 | |||
| 13391 | if (intel_crtc->atomic.disabled_planes & | ||
| 13392 | (1 << drm_plane_index(p))) { | ||
| 13393 | switch (p->type) { | ||
| 13394 | case DRM_PLANE_TYPE_PRIMARY: | ||
| 13395 | fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe); | ||
| 13396 | break; | ||
| 13397 | case DRM_PLANE_TYPE_CURSOR: | ||
| 13398 | fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe); | ||
| 13399 | break; | ||
| 13400 | case DRM_PLANE_TYPE_OVERLAY: | ||
| 13401 | fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe); | ||
| 13402 | break; | ||
| 13403 | } | ||
| 13404 | |||
| 13405 | mutex_lock(&dev->struct_mutex); | ||
| 13406 | i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits); | ||
| 13407 | mutex_unlock(&dev->struct_mutex); | ||
| 13408 | } | ||
| 13409 | } | ||
| 13410 | |||
| 13411 | if (intel_crtc->atomic.wait_for_flips) | ||
| 13412 | intel_crtc_wait_for_pending_flips(crtc); | ||
| 13413 | |||
| 13414 | if (intel_crtc->atomic.disable_fbc) | ||
| 13415 | intel_fbc_disable(dev); | ||
| 13416 | |||
| 13417 | if (intel_crtc->atomic.disable_ips) | ||
| 13418 | hsw_disable_ips(intel_crtc); | ||
| 13419 | |||
| 13420 | if (intel_crtc->atomic.pre_disable_primary) | ||
| 13421 | intel_pre_disable_primary(crtc); | ||
| 13422 | |||
| 13423 | if (intel_crtc->atomic.update_wm) | ||
| 13424 | intel_update_watermarks(crtc); | 13637 | intel_update_watermarks(crtc); |
| 13425 | 13638 | ||
| 13426 | intel_runtime_pm_get(dev_priv); | ||
| 13427 | |||
| 13428 | /* Perform vblank evasion around commit operation */ | 13639 | /* Perform vblank evasion around commit operation */ |
| 13429 | if (intel_crtc->active) | 13640 | if (crtc->state->active) |
| 13430 | intel_crtc->atomic.evade = | 13641 | intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count); |
| 13431 | intel_pipe_update_start(intel_crtc, | 13642 | |
| 13432 | &intel_crtc->atomic.start_vbl_count); | 13643 | if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9) |
| 13644 | skl_detach_scalers(intel_crtc); | ||
| 13433 | } | 13645 | } |
| 13434 | 13646 | ||
| 13435 | static void intel_finish_crtc_commit(struct drm_crtc *crtc) | 13647 | static void intel_finish_crtc_commit(struct drm_crtc *crtc) |
| 13436 | { | 13648 | { |
| 13437 | struct drm_device *dev = crtc->dev; | ||
| 13438 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 13439 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 13649 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 13440 | struct drm_plane *p; | ||
| 13441 | 13650 | ||
| 13442 | if (intel_crtc->atomic.evade) | 13651 | if (crtc->state->active) |
| 13443 | intel_pipe_update_end(intel_crtc, | 13652 | intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count); |
| 13444 | intel_crtc->atomic.start_vbl_count); | ||
| 13445 | |||
| 13446 | intel_runtime_pm_put(dev_priv); | ||
| 13447 | |||
| 13448 | if (intel_crtc->atomic.wait_vblank) | ||
| 13449 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
| 13450 | |||
| 13451 | intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits); | ||
| 13452 | |||
| 13453 | if (intel_crtc->atomic.update_fbc) { | ||
| 13454 | mutex_lock(&dev->struct_mutex); | ||
| 13455 | intel_fbc_update(dev); | ||
| 13456 | mutex_unlock(&dev->struct_mutex); | ||
| 13457 | } | ||
| 13458 | |||
| 13459 | if (intel_crtc->atomic.post_enable_primary) | ||
| 13460 | intel_post_enable_primary(crtc); | ||
| 13461 | |||
| 13462 | drm_for_each_legacy_plane(p, &dev->mode_config.plane_list) | ||
| 13463 | if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p)) | ||
| 13464 | intel_update_sprite_watermarks(p, crtc, 0, 0, 0, | ||
| 13465 | false, false); | ||
| 13466 | |||
| 13467 | memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic)); | ||
| 13468 | } | 13653 | } |
| 13469 | 13654 | ||
| 13470 | /** | 13655 | /** |
| @@ -13520,10 +13705,10 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, | |||
| 13520 | } | 13705 | } |
| 13521 | primary->pipe = pipe; | 13706 | primary->pipe = pipe; |
| 13522 | primary->plane = pipe; | 13707 | primary->plane = pipe; |
| 13708 | primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); | ||
| 13523 | primary->check_plane = intel_check_primary_plane; | 13709 | primary->check_plane = intel_check_primary_plane; |
| 13524 | primary->commit_plane = intel_commit_primary_plane; | 13710 | primary->commit_plane = intel_commit_primary_plane; |
| 13525 | primary->disable_plane = intel_disable_primary_plane; | 13711 | primary->disable_plane = intel_disable_primary_plane; |
| 13526 | primary->ckey.flags = I915_SET_COLORKEY_NONE; | ||
| 13527 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) | 13712 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) |
| 13528 | primary->plane = !pipe; | 13713 | primary->plane = !pipe; |
| 13529 | 13714 | ||
| @@ -13571,37 +13756,29 @@ void intel_create_rotation_property(struct drm_device *dev, struct intel_plane * | |||
| 13571 | 13756 | ||
| 13572 | static int | 13757 | static int |
| 13573 | intel_check_cursor_plane(struct drm_plane *plane, | 13758 | intel_check_cursor_plane(struct drm_plane *plane, |
| 13759 | struct intel_crtc_state *crtc_state, | ||
| 13574 | struct intel_plane_state *state) | 13760 | struct intel_plane_state *state) |
| 13575 | { | 13761 | { |
| 13576 | struct drm_crtc *crtc = state->base.crtc; | 13762 | struct drm_crtc *crtc = crtc_state->base.crtc; |
| 13577 | struct drm_device *dev = plane->dev; | ||
| 13578 | struct drm_framebuffer *fb = state->base.fb; | 13763 | struct drm_framebuffer *fb = state->base.fb; |
| 13579 | struct drm_rect *dest = &state->dst; | ||
| 13580 | struct drm_rect *src = &state->src; | ||
| 13581 | const struct drm_rect *clip = &state->clip; | ||
| 13582 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 13764 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 13583 | struct intel_crtc *intel_crtc; | ||
| 13584 | unsigned stride; | 13765 | unsigned stride; |
| 13585 | int ret; | 13766 | int ret; |
| 13586 | 13767 | ||
| 13587 | crtc = crtc ? crtc : plane->crtc; | 13768 | ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, |
| 13588 | intel_crtc = to_intel_crtc(crtc); | 13769 | &state->dst, &state->clip, |
| 13589 | |||
| 13590 | ret = drm_plane_helper_check_update(plane, crtc, fb, | ||
| 13591 | src, dest, clip, | ||
| 13592 | DRM_PLANE_HELPER_NO_SCALING, | 13770 | DRM_PLANE_HELPER_NO_SCALING, |
| 13593 | DRM_PLANE_HELPER_NO_SCALING, | 13771 | DRM_PLANE_HELPER_NO_SCALING, |
| 13594 | true, true, &state->visible); | 13772 | true, true, &state->visible); |
| 13595 | if (ret) | 13773 | if (ret) |
| 13596 | return ret; | 13774 | return ret; |
| 13597 | 13775 | ||
| 13598 | |||
| 13599 | /* if we want to turn off the cursor ignore width and height */ | 13776 | /* if we want to turn off the cursor ignore width and height */ |
| 13600 | if (!obj) | 13777 | if (!obj) |
| 13601 | goto finish; | 13778 | return 0; |
| 13602 | 13779 | ||
| 13603 | /* Check for which cursor types we support */ | 13780 | /* Check for which cursor types we support */ |
| 13604 | if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) { | 13781 | if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { |
| 13605 | DRM_DEBUG("Cursor dimension %dx%d not supported\n", | 13782 | DRM_DEBUG("Cursor dimension %dx%d not supported\n", |
| 13606 | state->base.crtc_w, state->base.crtc_h); | 13783 | state->base.crtc_w, state->base.crtc_h); |
| 13607 | return -EINVAL; | 13784 | return -EINVAL; |
| @@ -13615,34 +13792,16 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
| 13615 | 13792 | ||
| 13616 | if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { | 13793 | if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { |
| 13617 | DRM_DEBUG_KMS("cursor cannot be tiled\n"); | 13794 | DRM_DEBUG_KMS("cursor cannot be tiled\n"); |
| 13618 | ret = -EINVAL; | 13795 | return -EINVAL; |
| 13619 | } | ||
| 13620 | |||
| 13621 | finish: | ||
| 13622 | if (intel_crtc->active) { | ||
| 13623 | if (plane->state->crtc_w != state->base.crtc_w) | ||
| 13624 | intel_crtc->atomic.update_wm = true; | ||
| 13625 | |||
| 13626 | intel_crtc->atomic.fb_bits |= | ||
| 13627 | INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe); | ||
| 13628 | } | 13796 | } |
| 13629 | 13797 | ||
| 13630 | return ret; | 13798 | return 0; |
| 13631 | } | 13799 | } |
| 13632 | 13800 | ||
| 13633 | static void | 13801 | static void |
| 13634 | intel_disable_cursor_plane(struct drm_plane *plane, | 13802 | intel_disable_cursor_plane(struct drm_plane *plane, |
| 13635 | struct drm_crtc *crtc, | 13803 | struct drm_crtc *crtc) |
| 13636 | bool force) | ||
| 13637 | { | 13804 | { |
| 13638 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 13639 | |||
| 13640 | if (!force) { | ||
| 13641 | plane->fb = NULL; | ||
| 13642 | intel_crtc->cursor_bo = NULL; | ||
| 13643 | intel_crtc->cursor_addr = 0; | ||
| 13644 | } | ||
| 13645 | |||
| 13646 | intel_crtc_update_cursor(crtc, false); | 13805 | intel_crtc_update_cursor(crtc, false); |
| 13647 | } | 13806 | } |
| 13648 | 13807 | ||
| @@ -13675,9 +13834,9 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
| 13675 | 13834 | ||
| 13676 | intel_crtc->cursor_addr = addr; | 13835 | intel_crtc->cursor_addr = addr; |
| 13677 | intel_crtc->cursor_bo = obj; | 13836 | intel_crtc->cursor_bo = obj; |
| 13678 | update: | ||
| 13679 | 13837 | ||
| 13680 | if (intel_crtc->active) | 13838 | update: |
| 13839 | if (crtc->state->active) | ||
| 13681 | intel_crtc_update_cursor(crtc, state->visible); | 13840 | intel_crtc_update_cursor(crtc, state->visible); |
| 13682 | } | 13841 | } |
| 13683 | 13842 | ||
| @@ -13702,6 +13861,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, | |||
| 13702 | cursor->max_downscale = 1; | 13861 | cursor->max_downscale = 1; |
| 13703 | cursor->pipe = pipe; | 13862 | cursor->pipe = pipe; |
| 13704 | cursor->plane = pipe; | 13863 | cursor->plane = pipe; |
| 13864 | cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); | ||
| 13705 | cursor->check_plane = intel_check_cursor_plane; | 13865 | cursor->check_plane = intel_check_cursor_plane; |
| 13706 | cursor->commit_plane = intel_commit_cursor_plane; | 13866 | cursor->commit_plane = intel_commit_cursor_plane; |
| 13707 | cursor->disable_plane = intel_disable_cursor_plane; | 13867 | cursor->disable_plane = intel_disable_cursor_plane; |
| @@ -13742,8 +13902,6 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr | |||
| 13742 | for (i = 0; i < intel_crtc->num_scalers; i++) { | 13902 | for (i = 0; i < intel_crtc->num_scalers; i++) { |
| 13743 | intel_scaler = &scaler_state->scalers[i]; | 13903 | intel_scaler = &scaler_state->scalers[i]; |
| 13744 | intel_scaler->in_use = 0; | 13904 | intel_scaler->in_use = 0; |
| 13745 | intel_scaler->id = i; | ||
| 13746 | |||
| 13747 | intel_scaler->mode = PS_SCALER_MODE_DYN; | 13905 | intel_scaler->mode = PS_SCALER_MODE_DYN; |
| 13748 | } | 13906 | } |
| 13749 | 13907 | ||
| @@ -13815,6 +13973,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
| 13815 | intel_crtc->cursor_cntl = ~0; | 13973 | intel_crtc->cursor_cntl = ~0; |
| 13816 | intel_crtc->cursor_size = ~0; | 13974 | intel_crtc->cursor_size = ~0; |
| 13817 | 13975 | ||
| 13976 | intel_crtc->wm.cxsr_allowed = true; | ||
| 13977 | |||
| 13818 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || | 13978 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
| 13819 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); | 13979 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); |
| 13820 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | 13980 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
| @@ -14026,18 +14186,18 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14026 | } | 14186 | } |
| 14027 | 14187 | ||
| 14028 | intel_dsi_init(dev); | 14188 | intel_dsi_init(dev); |
| 14029 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 14189 | } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { |
| 14030 | bool found = false; | 14190 | bool found = false; |
| 14031 | 14191 | ||
| 14032 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { | 14192 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { |
| 14033 | DRM_DEBUG_KMS("probing SDVOB\n"); | 14193 | DRM_DEBUG_KMS("probing SDVOB\n"); |
| 14034 | found = intel_sdvo_init(dev, GEN3_SDVOB, true); | 14194 | found = intel_sdvo_init(dev, GEN3_SDVOB, true); |
| 14035 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { | 14195 | if (!found && IS_G4X(dev)) { |
| 14036 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | 14196 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
| 14037 | intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); | 14197 | intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); |
| 14038 | } | 14198 | } |
| 14039 | 14199 | ||
| 14040 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 14200 | if (!found && IS_G4X(dev)) |
| 14041 | intel_dp_init(dev, DP_B, PORT_B); | 14201 | intel_dp_init(dev, DP_B, PORT_B); |
| 14042 | } | 14202 | } |
| 14043 | 14203 | ||
| @@ -14050,15 +14210,15 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14050 | 14210 | ||
| 14051 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { | 14211 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { |
| 14052 | 14212 | ||
| 14053 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { | 14213 | if (IS_G4X(dev)) { |
| 14054 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | 14214 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
| 14055 | intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); | 14215 | intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); |
| 14056 | } | 14216 | } |
| 14057 | if (SUPPORTS_INTEGRATED_DP(dev)) | 14217 | if (IS_G4X(dev)) |
| 14058 | intel_dp_init(dev, DP_C, PORT_C); | 14218 | intel_dp_init(dev, DP_C, PORT_C); |
| 14059 | } | 14219 | } |
| 14060 | 14220 | ||
| 14061 | if (SUPPORTS_INTEGRATED_DP(dev) && | 14221 | if (IS_G4X(dev) && |
| 14062 | (I915_READ(DP_D) & DP_DETECTED)) | 14222 | (I915_READ(DP_D) & DP_DETECTED)) |
| 14063 | intel_dp_init(dev, DP_D, PORT_D); | 14223 | intel_dp_init(dev, DP_D, PORT_D); |
| 14064 | } else if (IS_GEN2(dev)) | 14224 | } else if (IS_GEN2(dev)) |
| @@ -14103,9 +14263,27 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | |||
| 14103 | return drm_gem_handle_create(file, &obj->base, handle); | 14263 | return drm_gem_handle_create(file, &obj->base, handle); |
| 14104 | } | 14264 | } |
| 14105 | 14265 | ||
| 14266 | static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, | ||
| 14267 | struct drm_file *file, | ||
| 14268 | unsigned flags, unsigned color, | ||
| 14269 | struct drm_clip_rect *clips, | ||
| 14270 | unsigned num_clips) | ||
| 14271 | { | ||
| 14272 | struct drm_device *dev = fb->dev; | ||
| 14273 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
| 14274 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
| 14275 | |||
| 14276 | mutex_lock(&dev->struct_mutex); | ||
| 14277 | intel_fb_obj_flush(obj, false, ORIGIN_GTT); | ||
| 14278 | mutex_unlock(&dev->struct_mutex); | ||
| 14279 | |||
| 14280 | return 0; | ||
| 14281 | } | ||
| 14282 | |||
| 14106 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | 14283 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
| 14107 | .destroy = intel_user_framebuffer_destroy, | 14284 | .destroy = intel_user_framebuffer_destroy, |
| 14108 | .create_handle = intel_user_framebuffer_create_handle, | 14285 | .create_handle = intel_user_framebuffer_create_handle, |
| 14286 | .dirty = intel_user_framebuffer_dirty, | ||
| 14109 | }; | 14287 | }; |
| 14110 | 14288 | ||
| 14111 | static | 14289 | static |
| @@ -14311,6 +14489,8 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
| 14311 | .output_poll_changed = intel_fbdev_output_poll_changed, | 14489 | .output_poll_changed = intel_fbdev_output_poll_changed, |
| 14312 | .atomic_check = intel_atomic_check, | 14490 | .atomic_check = intel_atomic_check, |
| 14313 | .atomic_commit = intel_atomic_commit, | 14491 | .atomic_commit = intel_atomic_commit, |
| 14492 | .atomic_state_alloc = intel_atomic_state_alloc, | ||
| 14493 | .atomic_state_clear = intel_atomic_state_clear, | ||
| 14314 | }; | 14494 | }; |
| 14315 | 14495 | ||
| 14316 | /* Set up chip specific display functions */ | 14496 | /* Set up chip specific display functions */ |
| @@ -14337,7 +14517,6 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14337 | haswell_crtc_compute_clock; | 14517 | haswell_crtc_compute_clock; |
| 14338 | dev_priv->display.crtc_enable = haswell_crtc_enable; | 14518 | dev_priv->display.crtc_enable = haswell_crtc_enable; |
| 14339 | dev_priv->display.crtc_disable = haswell_crtc_disable; | 14519 | dev_priv->display.crtc_disable = haswell_crtc_disable; |
| 14340 | dev_priv->display.off = ironlake_crtc_off; | ||
| 14341 | dev_priv->display.update_primary_plane = | 14520 | dev_priv->display.update_primary_plane = |
| 14342 | skylake_update_primary_plane; | 14521 | skylake_update_primary_plane; |
| 14343 | } else if (HAS_DDI(dev)) { | 14522 | } else if (HAS_DDI(dev)) { |
| @@ -14348,7 +14527,6 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14348 | haswell_crtc_compute_clock; | 14527 | haswell_crtc_compute_clock; |
| 14349 | dev_priv->display.crtc_enable = haswell_crtc_enable; | 14528 | dev_priv->display.crtc_enable = haswell_crtc_enable; |
| 14350 | dev_priv->display.crtc_disable = haswell_crtc_disable; | 14529 | dev_priv->display.crtc_disable = haswell_crtc_disable; |
| 14351 | dev_priv->display.off = ironlake_crtc_off; | ||
| 14352 | dev_priv->display.update_primary_plane = | 14530 | dev_priv->display.update_primary_plane = |
| 14353 | ironlake_update_primary_plane; | 14531 | ironlake_update_primary_plane; |
| 14354 | } else if (HAS_PCH_SPLIT(dev)) { | 14532 | } else if (HAS_PCH_SPLIT(dev)) { |
| @@ -14359,7 +14537,6 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14359 | ironlake_crtc_compute_clock; | 14537 | ironlake_crtc_compute_clock; |
| 14360 | dev_priv->display.crtc_enable = ironlake_crtc_enable; | 14538 | dev_priv->display.crtc_enable = ironlake_crtc_enable; |
| 14361 | dev_priv->display.crtc_disable = ironlake_crtc_disable; | 14539 | dev_priv->display.crtc_disable = ironlake_crtc_disable; |
| 14362 | dev_priv->display.off = ironlake_crtc_off; | ||
| 14363 | dev_priv->display.update_primary_plane = | 14540 | dev_priv->display.update_primary_plane = |
| 14364 | ironlake_update_primary_plane; | 14541 | ironlake_update_primary_plane; |
| 14365 | } else if (IS_VALLEYVIEW(dev)) { | 14542 | } else if (IS_VALLEYVIEW(dev)) { |
| @@ -14369,7 +14546,6 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14369 | dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; | 14546 | dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; |
| 14370 | dev_priv->display.crtc_enable = valleyview_crtc_enable; | 14547 | dev_priv->display.crtc_enable = valleyview_crtc_enable; |
| 14371 | dev_priv->display.crtc_disable = i9xx_crtc_disable; | 14548 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
| 14372 | dev_priv->display.off = i9xx_crtc_off; | ||
| 14373 | dev_priv->display.update_primary_plane = | 14549 | dev_priv->display.update_primary_plane = |
| 14374 | i9xx_update_primary_plane; | 14550 | i9xx_update_primary_plane; |
| 14375 | } else { | 14551 | } else { |
| @@ -14379,7 +14555,6 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14379 | dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; | 14555 | dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; |
| 14380 | dev_priv->display.crtc_enable = i9xx_crtc_enable; | 14556 | dev_priv->display.crtc_enable = i9xx_crtc_enable; |
| 14381 | dev_priv->display.crtc_disable = i9xx_crtc_disable; | 14557 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
| 14382 | dev_priv->display.off = i9xx_crtc_off; | ||
| 14383 | dev_priv->display.update_primary_plane = | 14558 | dev_priv->display.update_primary_plane = |
| 14384 | i9xx_update_primary_plane; | 14559 | i9xx_update_primary_plane; |
| 14385 | } | 14560 | } |
| @@ -14388,6 +14563,9 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14388 | if (IS_SKYLAKE(dev)) | 14563 | if (IS_SKYLAKE(dev)) |
| 14389 | dev_priv->display.get_display_clock_speed = | 14564 | dev_priv->display.get_display_clock_speed = |
| 14390 | skylake_get_display_clock_speed; | 14565 | skylake_get_display_clock_speed; |
| 14566 | else if (IS_BROXTON(dev)) | ||
| 14567 | dev_priv->display.get_display_clock_speed = | ||
| 14568 | broxton_get_display_clock_speed; | ||
| 14391 | else if (IS_BROADWELL(dev)) | 14569 | else if (IS_BROADWELL(dev)) |
| 14392 | dev_priv->display.get_display_clock_speed = | 14570 | dev_priv->display.get_display_clock_speed = |
| 14393 | broadwell_get_display_clock_speed; | 14571 | broadwell_get_display_clock_speed; |
| @@ -14401,9 +14579,21 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14401 | dev_priv->display.get_display_clock_speed = | 14579 | dev_priv->display.get_display_clock_speed = |
| 14402 | ilk_get_display_clock_speed; | 14580 | ilk_get_display_clock_speed; |
| 14403 | else if (IS_I945G(dev) || IS_BROADWATER(dev) || | 14581 | else if (IS_I945G(dev) || IS_BROADWATER(dev) || |
| 14404 | IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) | 14582 | IS_GEN6(dev) || IS_IVYBRIDGE(dev)) |
| 14405 | dev_priv->display.get_display_clock_speed = | 14583 | dev_priv->display.get_display_clock_speed = |
| 14406 | i945_get_display_clock_speed; | 14584 | i945_get_display_clock_speed; |
| 14585 | else if (IS_GM45(dev)) | ||
| 14586 | dev_priv->display.get_display_clock_speed = | ||
| 14587 | gm45_get_display_clock_speed; | ||
| 14588 | else if (IS_CRESTLINE(dev)) | ||
| 14589 | dev_priv->display.get_display_clock_speed = | ||
| 14590 | i965gm_get_display_clock_speed; | ||
| 14591 | else if (IS_PINEVIEW(dev)) | ||
| 14592 | dev_priv->display.get_display_clock_speed = | ||
| 14593 | pnv_get_display_clock_speed; | ||
| 14594 | else if (IS_G33(dev) || IS_G4X(dev)) | ||
| 14595 | dev_priv->display.get_display_clock_speed = | ||
| 14596 | g33_get_display_clock_speed; | ||
| 14407 | else if (IS_I915G(dev)) | 14597 | else if (IS_I915G(dev)) |
| 14408 | dev_priv->display.get_display_clock_speed = | 14598 | dev_priv->display.get_display_clock_speed = |
| 14409 | i915_get_display_clock_speed; | 14599 | i915_get_display_clock_speed; |
| @@ -14421,10 +14611,12 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14421 | i865_get_display_clock_speed; | 14611 | i865_get_display_clock_speed; |
| 14422 | else if (IS_I85X(dev)) | 14612 | else if (IS_I85X(dev)) |
| 14423 | dev_priv->display.get_display_clock_speed = | 14613 | dev_priv->display.get_display_clock_speed = |
| 14424 | i855_get_display_clock_speed; | 14614 | i85x_get_display_clock_speed; |
| 14425 | else /* 852, 830 */ | 14615 | else { /* 830 */ |
| 14616 | WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n"); | ||
| 14426 | dev_priv->display.get_display_clock_speed = | 14617 | dev_priv->display.get_display_clock_speed = |
| 14427 | i830_get_display_clock_speed; | 14618 | i830_get_display_clock_speed; |
| 14619 | } | ||
| 14428 | 14620 | ||
| 14429 | if (IS_GEN5(dev)) { | 14621 | if (IS_GEN5(dev)) { |
| 14430 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; | 14622 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
| @@ -14435,12 +14627,22 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14435 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | 14627 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
| 14436 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 14628 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
| 14437 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; | 14629 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
| 14630 | if (IS_BROADWELL(dev)) { | ||
| 14631 | dev_priv->display.modeset_commit_cdclk = | ||
| 14632 | broadwell_modeset_commit_cdclk; | ||
| 14633 | dev_priv->display.modeset_calc_cdclk = | ||
| 14634 | broadwell_modeset_calc_cdclk; | ||
| 14635 | } | ||
| 14438 | } else if (IS_VALLEYVIEW(dev)) { | 14636 | } else if (IS_VALLEYVIEW(dev)) { |
| 14439 | dev_priv->display.modeset_global_resources = | 14637 | dev_priv->display.modeset_commit_cdclk = |
| 14440 | valleyview_modeset_global_resources; | 14638 | valleyview_modeset_commit_cdclk; |
| 14639 | dev_priv->display.modeset_calc_cdclk = | ||
| 14640 | valleyview_modeset_calc_cdclk; | ||
| 14441 | } else if (IS_BROXTON(dev)) { | 14641 | } else if (IS_BROXTON(dev)) { |
| 14442 | dev_priv->display.modeset_global_resources = | 14642 | dev_priv->display.modeset_commit_cdclk = |
| 14443 | broxton_modeset_global_resources; | 14643 | broxton_modeset_commit_cdclk; |
| 14644 | dev_priv->display.modeset_calc_cdclk = | ||
| 14645 | broxton_modeset_calc_cdclk; | ||
| 14444 | } | 14646 | } |
| 14445 | 14647 | ||
| 14446 | switch (INTEL_INFO(dev)->gen) { | 14648 | switch (INTEL_INFO(dev)->gen) { |
| @@ -14659,13 +14861,9 @@ static void i915_disable_vga(struct drm_device *dev) | |||
| 14659 | 14861 | ||
| 14660 | void intel_modeset_init_hw(struct drm_device *dev) | 14862 | void intel_modeset_init_hw(struct drm_device *dev) |
| 14661 | { | 14863 | { |
| 14864 | intel_update_cdclk(dev); | ||
| 14662 | intel_prepare_ddi(dev); | 14865 | intel_prepare_ddi(dev); |
| 14663 | |||
| 14664 | if (IS_VALLEYVIEW(dev)) | ||
| 14665 | vlv_update_cdclk(dev); | ||
| 14666 | |||
| 14667 | intel_init_clock_gating(dev); | 14866 | intel_init_clock_gating(dev); |
| 14668 | |||
| 14669 | intel_enable_gt_powersave(dev); | 14867 | intel_enable_gt_powersave(dev); |
| 14670 | } | 14868 | } |
| 14671 | 14869 | ||
| @@ -14745,13 +14943,15 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 14745 | intel_setup_outputs(dev); | 14943 | intel_setup_outputs(dev); |
| 14746 | 14944 | ||
| 14747 | /* Just in case the BIOS is doing something questionable. */ | 14945 | /* Just in case the BIOS is doing something questionable. */ |
| 14748 | intel_fbc_disable(dev); | 14946 | intel_fbc_disable(dev_priv); |
| 14749 | 14947 | ||
| 14750 | drm_modeset_lock_all(dev); | 14948 | drm_modeset_lock_all(dev); |
| 14751 | intel_modeset_setup_hw_state(dev, false); | 14949 | intel_modeset_setup_hw_state(dev); |
| 14752 | drm_modeset_unlock_all(dev); | 14950 | drm_modeset_unlock_all(dev); |
| 14753 | 14951 | ||
| 14754 | for_each_intel_crtc(dev, crtc) { | 14952 | for_each_intel_crtc(dev, crtc) { |
| 14953 | struct intel_initial_plane_config plane_config = {}; | ||
| 14954 | |||
| 14755 | if (!crtc->active) | 14955 | if (!crtc->active) |
| 14756 | continue; | 14956 | continue; |
| 14757 | 14957 | ||
| @@ -14762,15 +14962,14 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 14762 | * can even allow for smooth boot transitions if the BIOS | 14962 | * can even allow for smooth boot transitions if the BIOS |
| 14763 | * fb is large enough for the active pipe configuration. | 14963 | * fb is large enough for the active pipe configuration. |
| 14764 | */ | 14964 | */ |
| 14765 | if (dev_priv->display.get_initial_plane_config) { | 14965 | dev_priv->display.get_initial_plane_config(crtc, |
| 14766 | dev_priv->display.get_initial_plane_config(crtc, | 14966 | &plane_config); |
| 14767 | &crtc->plane_config); | 14967 | |
| 14768 | /* | 14968 | /* |
| 14769 | * If the fb is shared between multiple heads, we'll | 14969 | * If the fb is shared between multiple heads, we'll |
| 14770 | * just get the first one. | 14970 | * just get the first one. |
| 14771 | */ | 14971 | */ |
| 14772 | intel_find_initial_plane_obj(crtc, &crtc->plane_config); | 14972 | intel_find_initial_plane_obj(crtc, &plane_config); |
| 14773 | } | ||
| 14774 | } | 14973 | } |
| 14775 | } | 14974 | } |
| 14776 | 14975 | ||
| @@ -14822,7 +15021,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 14822 | { | 15021 | { |
| 14823 | struct drm_device *dev = crtc->base.dev; | 15022 | struct drm_device *dev = crtc->base.dev; |
| 14824 | struct drm_i915_private *dev_priv = dev->dev_private; | 15023 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 15024 | struct intel_encoder *encoder; | ||
| 14825 | u32 reg; | 15025 | u32 reg; |
| 15026 | bool enable; | ||
| 14826 | 15027 | ||
| 14827 | /* Clear any frame start delays used for debugging left by the BIOS */ | 15028 | /* Clear any frame start delays used for debugging left by the BIOS */ |
| 14828 | reg = PIPECONF(crtc->config->cpu_transcoder); | 15029 | reg = PIPECONF(crtc->config->cpu_transcoder); |
| @@ -14831,6 +15032,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 14831 | /* restore vblank interrupts to correct state */ | 15032 | /* restore vblank interrupts to correct state */ |
| 14832 | drm_crtc_vblank_reset(&crtc->base); | 15033 | drm_crtc_vblank_reset(&crtc->base); |
| 14833 | if (crtc->active) { | 15034 | if (crtc->active) { |
| 15035 | drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); | ||
| 14834 | update_scanline_offset(crtc); | 15036 | update_scanline_offset(crtc); |
| 14835 | drm_crtc_vblank_on(&crtc->base); | 15037 | drm_crtc_vblank_on(&crtc->base); |
| 14836 | } | 15038 | } |
| @@ -14839,7 +15041,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 14839 | * disable the crtc (and hence change the state) if it is wrong. Note | 15041 | * disable the crtc (and hence change the state) if it is wrong. Note |
| 14840 | * that gen4+ has a fixed plane -> pipe mapping. */ | 15042 | * that gen4+ has a fixed plane -> pipe mapping. */ |
| 14841 | if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { | 15043 | if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { |
| 14842 | struct intel_connector *connector; | ||
| 14843 | bool plane; | 15044 | bool plane; |
| 14844 | 15045 | ||
| 14845 | DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", | 15046 | DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", |
| @@ -14851,30 +15052,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 14851 | plane = crtc->plane; | 15052 | plane = crtc->plane; |
| 14852 | to_intel_plane_state(crtc->base.primary->state)->visible = true; | 15053 | to_intel_plane_state(crtc->base.primary->state)->visible = true; |
| 14853 | crtc->plane = !plane; | 15054 | crtc->plane = !plane; |
| 14854 | intel_crtc_disable_planes(&crtc->base); | 15055 | intel_crtc_disable_noatomic(&crtc->base); |
| 14855 | dev_priv->display.crtc_disable(&crtc->base); | ||
| 14856 | crtc->plane = plane; | 15056 | crtc->plane = plane; |
| 14857 | |||
| 14858 | /* ... and break all links. */ | ||
| 14859 | for_each_intel_connector(dev, connector) { | ||
| 14860 | if (connector->encoder->base.crtc != &crtc->base) | ||
| 14861 | continue; | ||
| 14862 | |||
| 14863 | connector->base.dpms = DRM_MODE_DPMS_OFF; | ||
| 14864 | connector->base.encoder = NULL; | ||
| 14865 | } | ||
| 14866 | /* multiple connectors may have the same encoder: | ||
| 14867 | * handle them and break crtc link separately */ | ||
| 14868 | for_each_intel_connector(dev, connector) | ||
| 14869 | if (connector->encoder->base.crtc == &crtc->base) { | ||
| 14870 | connector->encoder->base.crtc = NULL; | ||
| 14871 | connector->encoder->connectors_active = false; | ||
| 14872 | } | ||
| 14873 | |||
| 14874 | WARN_ON(crtc->active); | ||
| 14875 | crtc->base.state->enable = false; | ||
| 14876 | crtc->base.state->active = false; | ||
| 14877 | crtc->base.enabled = false; | ||
| 14878 | } | 15057 | } |
| 14879 | 15058 | ||
| 14880 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && | 15059 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && |
| @@ -14888,20 +15067,25 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 14888 | 15067 | ||
| 14889 | /* Adjust the state of the output pipe according to whether we | 15068 | /* Adjust the state of the output pipe according to whether we |
| 14890 | * have active connectors/encoders. */ | 15069 | * have active connectors/encoders. */ |
| 14891 | intel_crtc_update_dpms(&crtc->base); | 15070 | enable = false; |
| 15071 | for_each_encoder_on_crtc(dev, &crtc->base, encoder) | ||
| 15072 | enable |= encoder->connectors_active; | ||
| 14892 | 15073 | ||
| 14893 | if (crtc->active != crtc->base.state->enable) { | 15074 | if (!enable) |
| 14894 | struct intel_encoder *encoder; | 15075 | intel_crtc_disable_noatomic(&crtc->base); |
| 15076 | |||
| 15077 | if (crtc->active != crtc->base.state->active) { | ||
| 14895 | 15078 | ||
| 14896 | /* This can happen either due to bugs in the get_hw_state | 15079 | /* This can happen either due to bugs in the get_hw_state |
| 14897 | * functions or because the pipe is force-enabled due to the | 15080 | * functions or because of calls to intel_crtc_disable_noatomic, |
| 15081 | * or because the pipe is force-enabled due to the | ||
| 14898 | * pipe A quirk. */ | 15082 | * pipe A quirk. */ |
| 14899 | DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", | 15083 | DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", |
| 14900 | crtc->base.base.id, | 15084 | crtc->base.base.id, |
| 14901 | crtc->base.state->enable ? "enabled" : "disabled", | 15085 | crtc->base.state->enable ? "enabled" : "disabled", |
| 14902 | crtc->active ? "enabled" : "disabled"); | 15086 | crtc->active ? "enabled" : "disabled"); |
| 14903 | 15087 | ||
| 14904 | crtc->base.state->enable = crtc->active; | 15088 | WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); |
| 14905 | crtc->base.state->active = crtc->active; | 15089 | crtc->base.state->active = crtc->active; |
| 14906 | crtc->base.enabled = crtc->active; | 15090 | crtc->base.enabled = crtc->active; |
| 14907 | 15091 | ||
| @@ -15014,10 +15198,31 @@ static bool primary_get_hw_state(struct intel_crtc *crtc) | |||
| 15014 | { | 15198 | { |
| 15015 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 15199 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 15016 | 15200 | ||
| 15017 | if (!crtc->active) | 15201 | return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); |
| 15018 | return false; | 15202 | } |
| 15019 | 15203 | ||
| 15020 | return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE; | 15204 | static void readout_plane_state(struct intel_crtc *crtc, |
| 15205 | struct intel_crtc_state *crtc_state) | ||
| 15206 | { | ||
| 15207 | struct intel_plane *p; | ||
| 15208 | struct intel_plane_state *plane_state; | ||
| 15209 | bool active = crtc_state->base.active; | ||
| 15210 | |||
| 15211 | for_each_intel_plane(crtc->base.dev, p) { | ||
| 15212 | if (crtc->pipe != p->pipe) | ||
| 15213 | continue; | ||
| 15214 | |||
| 15215 | plane_state = to_intel_plane_state(p->base.state); | ||
| 15216 | |||
| 15217 | if (p->base.type == DRM_PLANE_TYPE_PRIMARY) | ||
| 15218 | plane_state->visible = primary_get_hw_state(crtc); | ||
| 15219 | else { | ||
| 15220 | if (active) | ||
| 15221 | p->disable_plane(&p->base, &crtc->base); | ||
| 15222 | |||
| 15223 | plane_state->visible = false; | ||
| 15224 | } | ||
| 15225 | } | ||
| 15021 | } | 15226 | } |
| 15022 | 15227 | ||
| 15023 | static void intel_modeset_readout_hw_state(struct drm_device *dev) | 15228 | static void intel_modeset_readout_hw_state(struct drm_device *dev) |
| @@ -15030,22 +15235,44 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
| 15030 | int i; | 15235 | int i; |
| 15031 | 15236 | ||
| 15032 | for_each_intel_crtc(dev, crtc) { | 15237 | for_each_intel_crtc(dev, crtc) { |
| 15033 | struct drm_plane *primary = crtc->base.primary; | 15238 | __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state); |
| 15034 | struct intel_plane_state *plane_state; | ||
| 15035 | |||
| 15036 | memset(crtc->config, 0, sizeof(*crtc->config)); | 15239 | memset(crtc->config, 0, sizeof(*crtc->config)); |
| 15037 | 15240 | crtc->config->base.crtc = &crtc->base; | |
| 15038 | crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; | ||
| 15039 | 15241 | ||
| 15040 | crtc->active = dev_priv->display.get_pipe_config(crtc, | 15242 | crtc->active = dev_priv->display.get_pipe_config(crtc, |
| 15041 | crtc->config); | 15243 | crtc->config); |
| 15042 | 15244 | ||
| 15043 | crtc->base.state->enable = crtc->active; | ||
| 15044 | crtc->base.state->active = crtc->active; | 15245 | crtc->base.state->active = crtc->active; |
| 15045 | crtc->base.enabled = crtc->active; | 15246 | crtc->base.enabled = crtc->active; |
| 15046 | 15247 | ||
| 15047 | plane_state = to_intel_plane_state(primary->state); | 15248 | memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); |
| 15048 | plane_state->visible = primary_get_hw_state(crtc); | 15249 | if (crtc->base.state->active) { |
| 15250 | intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); | ||
| 15251 | intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); | ||
| 15252 | WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); | ||
| 15253 | |||
| 15254 | /* | ||
| 15255 | * The initial mode needs to be set in order to keep | ||
| 15256 | * the atomic core happy. It wants a valid mode if the | ||
| 15257 | * crtc's enabled, so we do the above call. | ||
| 15258 | * | ||
| 15259 | * At this point some state updated by the connectors | ||
| 15260 | * in their ->detect() callback has not run yet, so | ||
| 15261 | * no recalculation can be done yet. | ||
| 15262 | * | ||
| 15263 | * Even if we could do a recalculation and modeset | ||
| 15264 | * right now it would cause a double modeset if | ||
| 15265 | * fbdev or userspace chooses a different initial mode. | ||
| 15266 | * | ||
| 15267 | * If that happens, someone indicated they wanted a | ||
| 15268 | * mode change, which means it's safe to do a full | ||
| 15269 | * recalculation. | ||
| 15270 | */ | ||
| 15271 | crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; | ||
| 15272 | } | ||
| 15273 | |||
| 15274 | crtc->base.hwmode = crtc->config->base.adjusted_mode; | ||
| 15275 | readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state)); | ||
| 15049 | 15276 | ||
| 15050 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", | 15277 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", |
| 15051 | crtc->base.base.id, | 15278 | crtc->base.base.id, |
| @@ -15108,10 +15335,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
| 15108 | } | 15335 | } |
| 15109 | } | 15336 | } |
| 15110 | 15337 | ||
| 15111 | /* Scan out the current hw modeset state, sanitizes it and maps it into the drm | 15338 | /* Scan out the current hw modeset state, |
| 15112 | * and i915 state tracking structures. */ | 15339 | * and sanitizes it to the current state |
| 15113 | void intel_modeset_setup_hw_state(struct drm_device *dev, | 15340 | */ |
| 15114 | bool force_restore) | 15341 | static void |
| 15342 | intel_modeset_setup_hw_state(struct drm_device *dev) | ||
| 15115 | { | 15343 | { |
| 15116 | struct drm_i915_private *dev_priv = dev->dev_private; | 15344 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 15117 | enum pipe pipe; | 15345 | enum pipe pipe; |
| @@ -15121,21 +15349,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 15121 | 15349 | ||
| 15122 | intel_modeset_readout_hw_state(dev); | 15350 | intel_modeset_readout_hw_state(dev); |
| 15123 | 15351 | ||
| 15124 | /* | ||
| 15125 | * Now that we have the config, copy it to each CRTC struct | ||
| 15126 | * Note that this could go away if we move to using crtc_config | ||
| 15127 | * checking everywhere. | ||
| 15128 | */ | ||
| 15129 | for_each_intel_crtc(dev, crtc) { | ||
| 15130 | if (crtc->active && i915.fastboot) { | ||
| 15131 | intel_mode_from_pipe_config(&crtc->base.mode, | ||
| 15132 | crtc->config); | ||
| 15133 | DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", | ||
| 15134 | crtc->base.base.id); | ||
| 15135 | drm_mode_debug_printmodeline(&crtc->base.mode); | ||
| 15136 | } | ||
| 15137 | } | ||
| 15138 | |||
| 15139 | /* HW state is read out, now we need to sanitize this mess. */ | 15352 | /* HW state is read out, now we need to sanitize this mess. */ |
| 15140 | for_each_intel_encoder(dev, encoder) { | 15353 | for_each_intel_encoder(dev, encoder) { |
| 15141 | intel_sanitize_encoder(encoder); | 15354 | intel_sanitize_encoder(encoder); |
| @@ -15162,29 +15375,73 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 15162 | pll->on = false; | 15375 | pll->on = false; |
| 15163 | } | 15376 | } |
| 15164 | 15377 | ||
| 15165 | if (IS_GEN9(dev)) | 15378 | if (IS_VALLEYVIEW(dev)) |
| 15379 | vlv_wm_get_hw_state(dev); | ||
| 15380 | else if (IS_GEN9(dev)) | ||
| 15166 | skl_wm_get_hw_state(dev); | 15381 | skl_wm_get_hw_state(dev); |
| 15167 | else if (HAS_PCH_SPLIT(dev)) | 15382 | else if (HAS_PCH_SPLIT(dev)) |
| 15168 | ilk_wm_get_hw_state(dev); | 15383 | ilk_wm_get_hw_state(dev); |
| 15169 | 15384 | ||
| 15170 | if (force_restore) { | 15385 | for_each_intel_crtc(dev, crtc) { |
| 15171 | i915_redisable_vga(dev); | 15386 | unsigned long put_domains; |
| 15172 | 15387 | ||
| 15173 | /* | 15388 | put_domains = modeset_get_crtc_power_domains(&crtc->base); |
| 15174 | * We need to use raw interfaces for restoring state to avoid | 15389 | if (WARN_ON(put_domains)) |
| 15175 | * checking (bogus) intermediate states. | 15390 | modeset_put_power_domains(dev_priv, put_domains); |
| 15176 | */ | 15391 | } |
| 15177 | for_each_pipe(dev_priv, pipe) { | 15392 | intel_display_set_init_power(dev_priv, false); |
| 15178 | struct drm_crtc *crtc = | 15393 | } |
| 15179 | dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 15180 | 15394 | ||
| 15181 | intel_crtc_restore_mode(crtc); | 15395 | void intel_display_resume(struct drm_device *dev) |
| 15182 | } | 15396 | { |
| 15183 | } else { | 15397 | struct drm_atomic_state *state = drm_atomic_state_alloc(dev); |
| 15184 | intel_modeset_update_staged_output_state(dev); | 15398 | struct intel_connector *conn; |
| 15399 | struct intel_plane *plane; | ||
| 15400 | struct drm_crtc *crtc; | ||
| 15401 | int ret; | ||
| 15402 | |||
| 15403 | if (!state) | ||
| 15404 | return; | ||
| 15405 | |||
| 15406 | state->acquire_ctx = dev->mode_config.acquire_ctx; | ||
| 15407 | |||
| 15408 | /* preserve complete old state, including dpll */ | ||
| 15409 | intel_atomic_get_shared_dpll_state(state); | ||
| 15410 | |||
| 15411 | for_each_crtc(dev, crtc) { | ||
| 15412 | struct drm_crtc_state *crtc_state = | ||
| 15413 | drm_atomic_get_crtc_state(state, crtc); | ||
| 15414 | |||
| 15415 | ret = PTR_ERR_OR_ZERO(crtc_state); | ||
| 15416 | if (ret) | ||
| 15417 | goto err; | ||
| 15418 | |||
| 15419 | /* force a restore */ | ||
| 15420 | crtc_state->mode_changed = true; | ||
| 15421 | } | ||
| 15422 | |||
| 15423 | for_each_intel_plane(dev, plane) { | ||
| 15424 | ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base)); | ||
| 15425 | if (ret) | ||
| 15426 | goto err; | ||
| 15185 | } | 15427 | } |
| 15186 | 15428 | ||
| 15187 | intel_modeset_check_state(dev); | 15429 | for_each_intel_connector(dev, conn) { |
| 15430 | ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base)); | ||
| 15431 | if (ret) | ||
| 15432 | goto err; | ||
| 15433 | } | ||
| 15434 | |||
| 15435 | intel_modeset_setup_hw_state(dev); | ||
| 15436 | |||
| 15437 | i915_redisable_vga(dev); | ||
| 15438 | ret = drm_atomic_commit(state); | ||
| 15439 | if (!ret) | ||
| 15440 | return; | ||
| 15441 | |||
| 15442 | err: | ||
| 15443 | DRM_ERROR("Restoring old state failed with %i\n", ret); | ||
| 15444 | drm_atomic_state_free(state); | ||
| 15188 | } | 15445 | } |
| 15189 | 15446 | ||
| 15190 | void intel_modeset_gem_init(struct drm_device *dev) | 15447 | void intel_modeset_gem_init(struct drm_device *dev) |
| @@ -15226,14 +15483,16 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
| 15226 | ret = intel_pin_and_fence_fb_obj(c->primary, | 15483 | ret = intel_pin_and_fence_fb_obj(c->primary, |
| 15227 | c->primary->fb, | 15484 | c->primary->fb, |
| 15228 | c->primary->state, | 15485 | c->primary->state, |
| 15229 | NULL); | 15486 | NULL, NULL); |
| 15230 | mutex_unlock(&dev->struct_mutex); | 15487 | mutex_unlock(&dev->struct_mutex); |
| 15231 | if (ret) { | 15488 | if (ret) { |
| 15232 | DRM_ERROR("failed to pin boot fb on pipe %d\n", | 15489 | DRM_ERROR("failed to pin boot fb on pipe %d\n", |
| 15233 | to_intel_crtc(c)->pipe); | 15490 | to_intel_crtc(c)->pipe); |
| 15234 | drm_framebuffer_unreference(c->primary->fb); | 15491 | drm_framebuffer_unreference(c->primary->fb); |
| 15235 | c->primary->fb = NULL; | 15492 | c->primary->fb = NULL; |
| 15493 | c->primary->crtc = c->primary->state->crtc = NULL; | ||
| 15236 | update_state_fb(c->primary); | 15494 | update_state_fb(c->primary); |
| 15495 | c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); | ||
| 15237 | } | 15496 | } |
| 15238 | } | 15497 | } |
| 15239 | 15498 | ||
| @@ -15270,13 +15529,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 15270 | */ | 15529 | */ |
| 15271 | drm_kms_helper_poll_fini(dev); | 15530 | drm_kms_helper_poll_fini(dev); |
| 15272 | 15531 | ||
| 15273 | mutex_lock(&dev->struct_mutex); | ||
| 15274 | |||
| 15275 | intel_unregister_dsm_handler(); | 15532 | intel_unregister_dsm_handler(); |
| 15276 | 15533 | ||
| 15277 | intel_fbc_disable(dev); | 15534 | intel_fbc_disable(dev_priv); |
| 15278 | |||
| 15279 | mutex_unlock(&dev->struct_mutex); | ||
| 15280 | 15535 | ||
| 15281 | /* flush any delayed tasks or pending work */ | 15536 | /* flush any delayed tasks or pending work */ |
| 15282 | flush_scheduled_work(); | 15537 | flush_scheduled_work(); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6e8faa253792..f1b9f939b435 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -91,6 +91,8 @@ static const struct dp_link_dpll chv_dpll[] = { | |||
| 91 | { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } | 91 | { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } |
| 92 | }; | 92 | }; |
| 93 | 93 | ||
| 94 | static const int bxt_rates[] = { 162000, 216000, 243000, 270000, | ||
| 95 | 324000, 432000, 540000 }; | ||
| 94 | static const int skl_rates[] = { 162000, 216000, 270000, | 96 | static const int skl_rates[] = { 162000, 216000, 270000, |
| 95 | 324000, 432000, 540000 }; | 97 | 324000, 432000, 540000 }; |
| 96 | static const int chv_rates[] = { 162000, 202500, 210000, 216000, | 98 | static const int chv_rates[] = { 162000, 202500, 210000, 216000, |
| @@ -565,7 +567,9 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) | |||
| 565 | { | 567 | { |
| 566 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 568 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
| 567 | 569 | ||
| 568 | if (HAS_PCH_SPLIT(dev)) | 570 | if (IS_BROXTON(dev)) |
| 571 | return BXT_PP_CONTROL(0); | ||
| 572 | else if (HAS_PCH_SPLIT(dev)) | ||
| 569 | return PCH_PP_CONTROL; | 573 | return PCH_PP_CONTROL; |
| 570 | else | 574 | else |
| 571 | return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); | 575 | return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); |
| @@ -575,7 +579,9 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp) | |||
| 575 | { | 579 | { |
| 576 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 580 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
| 577 | 581 | ||
| 578 | if (HAS_PCH_SPLIT(dev)) | 582 | if (IS_BROXTON(dev)) |
| 583 | return BXT_PP_STATUS(0); | ||
| 584 | else if (HAS_PCH_SPLIT(dev)) | ||
| 579 | return PCH_PP_STATUS; | 585 | return PCH_PP_STATUS; |
| 580 | else | 586 | else |
| 581 | return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); | 587 | return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); |
| @@ -708,7 +714,8 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | |||
| 708 | return 0; | 714 | return 0; |
| 709 | 715 | ||
| 710 | if (intel_dig_port->port == PORT_A) { | 716 | if (intel_dig_port->port == PORT_A) { |
| 711 | return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000); | 717 | return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000); |
| 718 | |||
| 712 | } else { | 719 | } else { |
| 713 | return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); | 720 | return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
| 714 | } | 721 | } |
| @@ -723,7 +730,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | |||
| 723 | if (intel_dig_port->port == PORT_A) { | 730 | if (intel_dig_port->port == PORT_A) { |
| 724 | if (index) | 731 | if (index) |
| 725 | return 0; | 732 | return 0; |
| 726 | return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000); | 733 | return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000); |
| 727 | } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | 734 | } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
| 728 | /* Workaround for non-ULT HSW */ | 735 | /* Workaround for non-ULT HSW */ |
| 729 | switch (index) { | 736 | switch (index) { |
| @@ -1172,7 +1179,10 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) | |||
| 1172 | static int | 1179 | static int |
| 1173 | intel_dp_source_rates(struct drm_device *dev, const int **source_rates) | 1180 | intel_dp_source_rates(struct drm_device *dev, const int **source_rates) |
| 1174 | { | 1181 | { |
| 1175 | if (IS_SKYLAKE(dev)) { | 1182 | if (IS_BROXTON(dev)) { |
| 1183 | *source_rates = bxt_rates; | ||
| 1184 | return ARRAY_SIZE(bxt_rates); | ||
| 1185 | } else if (IS_SKYLAKE(dev)) { | ||
| 1176 | *source_rates = skl_rates; | 1186 | *source_rates = skl_rates; |
| 1177 | return ARRAY_SIZE(skl_rates); | 1187 | return ARRAY_SIZE(skl_rates); |
| 1178 | } else if (IS_CHERRYVIEW(dev)) { | 1188 | } else if (IS_CHERRYVIEW(dev)) { |
| @@ -1374,7 +1384,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
| 1374 | 1384 | ||
| 1375 | if (INTEL_INFO(dev)->gen >= 9) { | 1385 | if (INTEL_INFO(dev)->gen >= 9) { |
| 1376 | int ret; | 1386 | int ret; |
| 1377 | ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0); | 1387 | ret = skl_update_scaler_crtc(pipe_config); |
| 1378 | if (ret) | 1388 | if (ret) |
| 1379 | return ret; | 1389 | return ret; |
| 1380 | } | 1390 | } |
| @@ -1699,8 +1709,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) | |||
| 1699 | lockdep_assert_held(&dev_priv->pps_mutex); | 1709 | lockdep_assert_held(&dev_priv->pps_mutex); |
| 1700 | 1710 | ||
| 1701 | control = I915_READ(_pp_ctrl_reg(intel_dp)); | 1711 | control = I915_READ(_pp_ctrl_reg(intel_dp)); |
| 1702 | control &= ~PANEL_UNLOCK_MASK; | 1712 | if (!IS_BROXTON(dev)) { |
| 1703 | control |= PANEL_UNLOCK_REGS; | 1713 | control &= ~PANEL_UNLOCK_MASK; |
| 1714 | control |= PANEL_UNLOCK_REGS; | ||
| 1715 | } | ||
| 1704 | return control; | 1716 | return control; |
| 1705 | } | 1717 | } |
| 1706 | 1718 | ||
| @@ -3414,92 +3426,6 @@ gen7_edp_signal_levels(uint8_t train_set) | |||
| 3414 | } | 3426 | } |
| 3415 | } | 3427 | } |
| 3416 | 3428 | ||
| 3417 | /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ | ||
| 3418 | static uint32_t | ||
| 3419 | hsw_signal_levels(uint8_t train_set) | ||
| 3420 | { | ||
| 3421 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | ||
| 3422 | DP_TRAIN_PRE_EMPHASIS_MASK); | ||
| 3423 | switch (signal_levels) { | ||
| 3424 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3425 | return DDI_BUF_TRANS_SELECT(0); | ||
| 3426 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 3427 | return DDI_BUF_TRANS_SELECT(1); | ||
| 3428 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: | ||
| 3429 | return DDI_BUF_TRANS_SELECT(2); | ||
| 3430 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3: | ||
| 3431 | return DDI_BUF_TRANS_SELECT(3); | ||
| 3432 | |||
| 3433 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3434 | return DDI_BUF_TRANS_SELECT(4); | ||
| 3435 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 3436 | return DDI_BUF_TRANS_SELECT(5); | ||
| 3437 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: | ||
| 3438 | return DDI_BUF_TRANS_SELECT(6); | ||
| 3439 | |||
| 3440 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3441 | return DDI_BUF_TRANS_SELECT(7); | ||
| 3442 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 3443 | return DDI_BUF_TRANS_SELECT(8); | ||
| 3444 | |||
| 3445 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3446 | return DDI_BUF_TRANS_SELECT(9); | ||
| 3447 | default: | ||
| 3448 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" | ||
| 3449 | "0x%x\n", signal_levels); | ||
| 3450 | return DDI_BUF_TRANS_SELECT(0); | ||
| 3451 | } | ||
| 3452 | } | ||
| 3453 | |||
| 3454 | static void bxt_signal_levels(struct intel_dp *intel_dp) | ||
| 3455 | { | ||
| 3456 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); | ||
| 3457 | enum port port = dport->port; | ||
| 3458 | struct drm_device *dev = dport->base.base.dev; | ||
| 3459 | struct intel_encoder *encoder = &dport->base; | ||
| 3460 | uint8_t train_set = intel_dp->train_set[0]; | ||
| 3461 | uint32_t level = 0; | ||
| 3462 | |||
| 3463 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | ||
| 3464 | DP_TRAIN_PRE_EMPHASIS_MASK); | ||
| 3465 | switch (signal_levels) { | ||
| 3466 | default: | ||
| 3467 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n"); | ||
| 3468 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3469 | level = 0; | ||
| 3470 | break; | ||
| 3471 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 3472 | level = 1; | ||
| 3473 | break; | ||
| 3474 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: | ||
| 3475 | level = 2; | ||
| 3476 | break; | ||
| 3477 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3: | ||
| 3478 | level = 3; | ||
| 3479 | break; | ||
| 3480 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3481 | level = 4; | ||
| 3482 | break; | ||
| 3483 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 3484 | level = 5; | ||
| 3485 | break; | ||
| 3486 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: | ||
| 3487 | level = 6; | ||
| 3488 | break; | ||
| 3489 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3490 | level = 7; | ||
| 3491 | break; | ||
| 3492 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: | ||
| 3493 | level = 8; | ||
| 3494 | break; | ||
| 3495 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: | ||
| 3496 | level = 9; | ||
| 3497 | break; | ||
| 3498 | } | ||
| 3499 | |||
| 3500 | bxt_ddi_vswing_sequence(dev, level, port, encoder->type); | ||
| 3501 | } | ||
| 3502 | |||
| 3503 | /* Properly updates "DP" with the correct signal levels. */ | 3429 | /* Properly updates "DP" with the correct signal levels. */ |
| 3504 | static void | 3430 | static void |
| 3505 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) | 3431 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) |
| @@ -3507,22 +3433,20 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) | |||
| 3507 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3433 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
| 3508 | enum port port = intel_dig_port->port; | 3434 | enum port port = intel_dig_port->port; |
| 3509 | struct drm_device *dev = intel_dig_port->base.base.dev; | 3435 | struct drm_device *dev = intel_dig_port->base.base.dev; |
| 3510 | uint32_t signal_levels, mask; | 3436 | uint32_t signal_levels, mask = 0; |
| 3511 | uint8_t train_set = intel_dp->train_set[0]; | 3437 | uint8_t train_set = intel_dp->train_set[0]; |
| 3512 | 3438 | ||
| 3513 | if (IS_BROXTON(dev)) { | 3439 | if (HAS_DDI(dev)) { |
| 3514 | signal_levels = 0; | 3440 | signal_levels = ddi_signal_levels(intel_dp); |
| 3515 | bxt_signal_levels(intel_dp); | 3441 | |
| 3516 | mask = 0; | 3442 | if (IS_BROXTON(dev)) |
| 3517 | } else if (HAS_DDI(dev)) { | 3443 | signal_levels = 0; |
| 3518 | signal_levels = hsw_signal_levels(train_set); | 3444 | else |
| 3519 | mask = DDI_BUF_EMP_MASK; | 3445 | mask = DDI_BUF_EMP_MASK; |
| 3520 | } else if (IS_CHERRYVIEW(dev)) { | 3446 | } else if (IS_CHERRYVIEW(dev)) { |
| 3521 | signal_levels = chv_signal_levels(intel_dp); | 3447 | signal_levels = chv_signal_levels(intel_dp); |
| 3522 | mask = 0; | ||
| 3523 | } else if (IS_VALLEYVIEW(dev)) { | 3448 | } else if (IS_VALLEYVIEW(dev)) { |
| 3524 | signal_levels = vlv_signal_levels(intel_dp); | 3449 | signal_levels = vlv_signal_levels(intel_dp); |
| 3525 | mask = 0; | ||
| 3526 | } else if (IS_GEN7(dev) && port == PORT_A) { | 3450 | } else if (IS_GEN7(dev) && port == PORT_A) { |
| 3527 | signal_levels = gen7_edp_signal_levels(train_set); | 3451 | signal_levels = gen7_edp_signal_levels(train_set); |
| 3528 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; | 3452 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; |
| @@ -4922,12 +4846,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { | |||
| 4922 | .destroy = intel_dp_encoder_destroy, | 4846 | .destroy = intel_dp_encoder_destroy, |
| 4923 | }; | 4847 | }; |
| 4924 | 4848 | ||
| 4925 | void | ||
| 4926 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) | ||
| 4927 | { | ||
| 4928 | return; | ||
| 4929 | } | ||
| 4930 | |||
| 4931 | enum irqreturn | 4849 | enum irqreturn |
| 4932 | intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | 4850 | intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) |
| 4933 | { | 4851 | { |
| @@ -5095,8 +5013,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
| 5095 | struct drm_i915_private *dev_priv = dev->dev_private; | 5013 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5096 | struct edp_power_seq cur, vbt, spec, | 5014 | struct edp_power_seq cur, vbt, spec, |
| 5097 | *final = &intel_dp->pps_delays; | 5015 | *final = &intel_dp->pps_delays; |
| 5098 | u32 pp_on, pp_off, pp_div, pp; | 5016 | u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; |
| 5099 | int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; | 5017 | int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0; |
| 5100 | 5018 | ||
| 5101 | lockdep_assert_held(&dev_priv->pps_mutex); | 5019 | lockdep_assert_held(&dev_priv->pps_mutex); |
| 5102 | 5020 | ||
| @@ -5104,7 +5022,16 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
| 5104 | if (final->t11_t12 != 0) | 5022 | if (final->t11_t12 != 0) |
| 5105 | return; | 5023 | return; |
| 5106 | 5024 | ||
| 5107 | if (HAS_PCH_SPLIT(dev)) { | 5025 | if (IS_BROXTON(dev)) { |
| 5026 | /* | ||
| 5027 | * TODO: BXT has 2 sets of PPS registers. | ||
| 5028 | * Correct Register for Broxton need to be identified | ||
| 5029 | * using VBT. hardcoding for now | ||
| 5030 | */ | ||
| 5031 | pp_ctrl_reg = BXT_PP_CONTROL(0); | ||
| 5032 | pp_on_reg = BXT_PP_ON_DELAYS(0); | ||
| 5033 | pp_off_reg = BXT_PP_OFF_DELAYS(0); | ||
| 5034 | } else if (HAS_PCH_SPLIT(dev)) { | ||
| 5108 | pp_ctrl_reg = PCH_PP_CONTROL; | 5035 | pp_ctrl_reg = PCH_PP_CONTROL; |
| 5109 | pp_on_reg = PCH_PP_ON_DELAYS; | 5036 | pp_on_reg = PCH_PP_ON_DELAYS; |
| 5110 | pp_off_reg = PCH_PP_OFF_DELAYS; | 5037 | pp_off_reg = PCH_PP_OFF_DELAYS; |
| @@ -5120,12 +5047,14 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
| 5120 | 5047 | ||
| 5121 | /* Workaround: Need to write PP_CONTROL with the unlock key as | 5048 | /* Workaround: Need to write PP_CONTROL with the unlock key as |
| 5122 | * the very first thing. */ | 5049 | * the very first thing. */ |
| 5123 | pp = ironlake_get_pp_control(intel_dp); | 5050 | pp_ctl = ironlake_get_pp_control(intel_dp); |
| 5124 | I915_WRITE(pp_ctrl_reg, pp); | ||
| 5125 | 5051 | ||
| 5126 | pp_on = I915_READ(pp_on_reg); | 5052 | pp_on = I915_READ(pp_on_reg); |
| 5127 | pp_off = I915_READ(pp_off_reg); | 5053 | pp_off = I915_READ(pp_off_reg); |
| 5128 | pp_div = I915_READ(pp_div_reg); | 5054 | if (!IS_BROXTON(dev)) { |
| 5055 | I915_WRITE(pp_ctrl_reg, pp_ctl); | ||
| 5056 | pp_div = I915_READ(pp_div_reg); | ||
| 5057 | } | ||
| 5129 | 5058 | ||
| 5130 | /* Pull timing values out of registers */ | 5059 | /* Pull timing values out of registers */ |
| 5131 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> | 5060 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
| @@ -5140,8 +5069,17 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
| 5140 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> | 5069 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> |
| 5141 | PANEL_POWER_DOWN_DELAY_SHIFT; | 5070 | PANEL_POWER_DOWN_DELAY_SHIFT; |
| 5142 | 5071 | ||
| 5143 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> | 5072 | if (IS_BROXTON(dev)) { |
| 5073 | u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> | ||
| 5074 | BXT_POWER_CYCLE_DELAY_SHIFT; | ||
| 5075 | if (tmp > 0) | ||
| 5076 | cur.t11_t12 = (tmp - 1) * 1000; | ||
| 5077 | else | ||
| 5078 | cur.t11_t12 = 0; | ||
| 5079 | } else { | ||
| 5080 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> | ||
| 5144 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; | 5081 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; |
| 5082 | } | ||
| 5145 | 5083 | ||
| 5146 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | 5084 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
| 5147 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); | 5085 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); |
| @@ -5198,13 +5136,23 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | |||
| 5198 | struct drm_i915_private *dev_priv = dev->dev_private; | 5136 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5199 | u32 pp_on, pp_off, pp_div, port_sel = 0; | 5137 | u32 pp_on, pp_off, pp_div, port_sel = 0; |
| 5200 | int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); | 5138 | int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); |
| 5201 | int pp_on_reg, pp_off_reg, pp_div_reg; | 5139 | int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg; |
| 5202 | enum port port = dp_to_dig_port(intel_dp)->port; | 5140 | enum port port = dp_to_dig_port(intel_dp)->port; |
| 5203 | const struct edp_power_seq *seq = &intel_dp->pps_delays; | 5141 | const struct edp_power_seq *seq = &intel_dp->pps_delays; |
| 5204 | 5142 | ||
| 5205 | lockdep_assert_held(&dev_priv->pps_mutex); | 5143 | lockdep_assert_held(&dev_priv->pps_mutex); |
| 5206 | 5144 | ||
| 5207 | if (HAS_PCH_SPLIT(dev)) { | 5145 | if (IS_BROXTON(dev)) { |
| 5146 | /* | ||
| 5147 | * TODO: BXT has 2 sets of PPS registers. | ||
| 5148 | * Correct Register for Broxton need to be identified | ||
| 5149 | * using VBT. hardcoding for now | ||
| 5150 | */ | ||
| 5151 | pp_ctrl_reg = BXT_PP_CONTROL(0); | ||
| 5152 | pp_on_reg = BXT_PP_ON_DELAYS(0); | ||
| 5153 | pp_off_reg = BXT_PP_OFF_DELAYS(0); | ||
| 5154 | |||
| 5155 | } else if (HAS_PCH_SPLIT(dev)) { | ||
| 5208 | pp_on_reg = PCH_PP_ON_DELAYS; | 5156 | pp_on_reg = PCH_PP_ON_DELAYS; |
| 5209 | pp_off_reg = PCH_PP_OFF_DELAYS; | 5157 | pp_off_reg = PCH_PP_OFF_DELAYS; |
| 5210 | pp_div_reg = PCH_PP_DIVISOR; | 5158 | pp_div_reg = PCH_PP_DIVISOR; |
| @@ -5230,9 +5178,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | |||
| 5230 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); | 5178 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); |
| 5231 | /* Compute the divisor for the pp clock, simply match the Bspec | 5179 | /* Compute the divisor for the pp clock, simply match the Bspec |
| 5232 | * formula. */ | 5180 | * formula. */ |
| 5233 | pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; | 5181 | if (IS_BROXTON(dev)) { |
| 5234 | pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) | 5182 | pp_div = I915_READ(pp_ctrl_reg); |
| 5235 | << PANEL_POWER_CYCLE_DELAY_SHIFT); | 5183 | pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; |
| 5184 | pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) | ||
| 5185 | << BXT_POWER_CYCLE_DELAY_SHIFT); | ||
| 5186 | } else { | ||
| 5187 | pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; | ||
| 5188 | pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) | ||
| 5189 | << PANEL_POWER_CYCLE_DELAY_SHIFT); | ||
| 5190 | } | ||
| 5236 | 5191 | ||
| 5237 | /* Haswell doesn't have any port selection bits for the panel | 5192 | /* Haswell doesn't have any port selection bits for the panel |
| 5238 | * power sequencer any more. */ | 5193 | * power sequencer any more. */ |
| @@ -5249,11 +5204,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | |||
| 5249 | 5204 | ||
| 5250 | I915_WRITE(pp_on_reg, pp_on); | 5205 | I915_WRITE(pp_on_reg, pp_on); |
| 5251 | I915_WRITE(pp_off_reg, pp_off); | 5206 | I915_WRITE(pp_off_reg, pp_off); |
| 5252 | I915_WRITE(pp_div_reg, pp_div); | 5207 | if (IS_BROXTON(dev)) |
| 5208 | I915_WRITE(pp_ctrl_reg, pp_div); | ||
| 5209 | else | ||
| 5210 | I915_WRITE(pp_div_reg, pp_div); | ||
| 5253 | 5211 | ||
| 5254 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", | 5212 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", |
| 5255 | I915_READ(pp_on_reg), | 5213 | I915_READ(pp_on_reg), |
| 5256 | I915_READ(pp_off_reg), | 5214 | I915_READ(pp_off_reg), |
| 5215 | IS_BROXTON(dev) ? | ||
| 5216 | (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) : | ||
| 5257 | I915_READ(pp_div_reg)); | 5217 | I915_READ(pp_div_reg)); |
| 5258 | } | 5218 | } |
| 5259 | 5219 | ||
| @@ -5458,13 +5418,12 @@ unlock: | |||
| 5458 | } | 5418 | } |
| 5459 | 5419 | ||
| 5460 | /** | 5420 | /** |
| 5461 | * intel_edp_drrs_invalidate - Invalidate DRRS | 5421 | * intel_edp_drrs_invalidate - Disable Idleness DRRS |
| 5462 | * @dev: DRM device | 5422 | * @dev: DRM device |
| 5463 | * @frontbuffer_bits: frontbuffer plane tracking bits | 5423 | * @frontbuffer_bits: frontbuffer plane tracking bits |
| 5464 | * | 5424 | * |
| 5465 | * When there is a disturbance on screen (due to cursor movement/time | 5425 | * This function gets called everytime rendering on the given planes start. |
| 5466 | * update etc), DRRS needs to be invalidated, i.e. need to switch to | 5426 | * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). |
| 5467 | * high RR. | ||
| 5468 | * | 5427 | * |
| 5469 | * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. | 5428 | * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. |
| 5470 | */ | 5429 | */ |
| @@ -5489,26 +5448,27 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, | |||
| 5489 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; | 5448 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; |
| 5490 | pipe = to_intel_crtc(crtc)->pipe; | 5449 | pipe = to_intel_crtc(crtc)->pipe; |
| 5491 | 5450 | ||
| 5492 | if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { | 5451 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
| 5452 | dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; | ||
| 5453 | |||
| 5454 | /* invalidate means busy screen hence upclock */ | ||
| 5455 | if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) | ||
| 5493 | intel_dp_set_drrs_state(dev_priv->dev, | 5456 | intel_dp_set_drrs_state(dev_priv->dev, |
| 5494 | dev_priv->drrs.dp->attached_connector->panel. | 5457 | dev_priv->drrs.dp->attached_connector->panel. |
| 5495 | fixed_mode->vrefresh); | 5458 | fixed_mode->vrefresh); |
| 5496 | } | ||
| 5497 | |||
| 5498 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | ||
| 5499 | 5459 | ||
| 5500 | dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; | ||
| 5501 | mutex_unlock(&dev_priv->drrs.mutex); | 5460 | mutex_unlock(&dev_priv->drrs.mutex); |
| 5502 | } | 5461 | } |
| 5503 | 5462 | ||
| 5504 | /** | 5463 | /** |
| 5505 | * intel_edp_drrs_flush - Flush DRRS | 5464 | * intel_edp_drrs_flush - Restart Idleness DRRS |
| 5506 | * @dev: DRM device | 5465 | * @dev: DRM device |
| 5507 | * @frontbuffer_bits: frontbuffer plane tracking bits | 5466 | * @frontbuffer_bits: frontbuffer plane tracking bits |
| 5508 | * | 5467 | * |
| 5509 | * When there is no movement on screen, DRRS work can be scheduled. | 5468 | * This function gets called every time rendering on the given planes has |
| 5510 | * This DRRS work is responsible for setting relevant registers after a | 5469 | * completed or flip on a crtc is completed. So DRRS should be upclocked |
| 5511 | * timeout of 1 second. | 5470 | * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, |
| 5471 | * if no other planes are dirty. | ||
| 5512 | * | 5472 | * |
| 5513 | * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. | 5473 | * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. |
| 5514 | */ | 5474 | */ |
| @@ -5532,10 +5492,21 @@ void intel_edp_drrs_flush(struct drm_device *dev, | |||
| 5532 | 5492 | ||
| 5533 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; | 5493 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; |
| 5534 | pipe = to_intel_crtc(crtc)->pipe; | 5494 | pipe = to_intel_crtc(crtc)->pipe; |
| 5495 | |||
| 5496 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | ||
| 5535 | dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; | 5497 | dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; |
| 5536 | 5498 | ||
| 5537 | if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR && | 5499 | /* flush means busy screen hence upclock */ |
| 5538 | !dev_priv->drrs.busy_frontbuffer_bits) | 5500 | if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) |
| 5501 | intel_dp_set_drrs_state(dev_priv->dev, | ||
| 5502 | dev_priv->drrs.dp->attached_connector->panel. | ||
| 5503 | fixed_mode->vrefresh); | ||
| 5504 | |||
| 5505 | /* | ||
| 5506 | * flush also means no more activity hence schedule downclock, if all | ||
| 5507 | * other fbs are quiescent too | ||
| 5508 | */ | ||
| 5509 | if (!dev_priv->drrs.busy_frontbuffer_bits) | ||
| 5539 | schedule_delayed_work(&dev_priv->drrs.work, | 5510 | schedule_delayed_work(&dev_priv->drrs.work, |
| 5540 | msecs_to_jiffies(1000)); | 5511 | msecs_to_jiffies(1000)); |
| 5541 | mutex_unlock(&dev_priv->drrs.mutex); | 5512 | mutex_unlock(&dev_priv->drrs.mutex); |
| @@ -5939,10 +5910,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
| 5939 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 5910 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
| 5940 | } | 5911 | } |
| 5941 | intel_encoder->cloneable = 0; | 5912 | intel_encoder->cloneable = 0; |
| 5942 | intel_encoder->hot_plug = intel_dp_hot_plug; | ||
| 5943 | 5913 | ||
| 5944 | intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; | 5914 | intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; |
| 5945 | dev_priv->hpd_irq_port[port] = intel_dig_port; | 5915 | dev_priv->hotplug.irq_port[port] = intel_dig_port; |
| 5946 | 5916 | ||
| 5947 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { | 5917 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { |
| 5948 | drm_encoder_cleanup(encoder); | 5918 | drm_encoder_cleanup(encoder); |
| @@ -5958,7 +5928,7 @@ void intel_dp_mst_suspend(struct drm_device *dev) | |||
| 5958 | 5928 | ||
| 5959 | /* disable MST */ | 5929 | /* disable MST */ |
| 5960 | for (i = 0; i < I915_MAX_PORTS; i++) { | 5930 | for (i = 0; i < I915_MAX_PORTS; i++) { |
| 5961 | struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; | 5931 | struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; |
| 5962 | if (!intel_dig_port) | 5932 | if (!intel_dig_port) |
| 5963 | continue; | 5933 | continue; |
| 5964 | 5934 | ||
| @@ -5977,7 +5947,7 @@ void intel_dp_mst_resume(struct drm_device *dev) | |||
| 5977 | int i; | 5947 | int i; |
| 5978 | 5948 | ||
| 5979 | for (i = 0; i < I915_MAX_PORTS; i++) { | 5949 | for (i = 0; i < I915_MAX_PORTS; i++) { |
| 5980 | struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; | 5950 | struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; |
| 5981 | if (!intel_dig_port) | 5951 | if (!intel_dig_port) |
| 5982 | continue; | 5952 | continue; |
| 5983 | if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { | 5953 | if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 105928382e21..3b00d00c0bc0 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -130,11 +130,6 @@ struct intel_fbdev { | |||
| 130 | 130 | ||
| 131 | struct intel_encoder { | 131 | struct intel_encoder { |
| 132 | struct drm_encoder base; | 132 | struct drm_encoder base; |
| 133 | /* | ||
| 134 | * The new crtc this encoder will be driven from. Only differs from | ||
| 135 | * base->crtc while a modeset is in progress. | ||
| 136 | */ | ||
| 137 | struct intel_crtc *new_crtc; | ||
| 138 | 133 | ||
| 139 | enum intel_output_type type; | 134 | enum intel_output_type type; |
| 140 | unsigned int cloneable; | 135 | unsigned int cloneable; |
| @@ -195,12 +190,6 @@ struct intel_connector { | |||
| 195 | */ | 190 | */ |
| 196 | struct intel_encoder *encoder; | 191 | struct intel_encoder *encoder; |
| 197 | 192 | ||
| 198 | /* | ||
| 199 | * The new encoder this connector will be driven. Only differs from | ||
| 200 | * encoder while a modeset is in progress. | ||
| 201 | */ | ||
| 202 | struct intel_encoder *new_encoder; | ||
| 203 | |||
| 204 | /* Reads out the current hw, returning true if the connector is enabled | 193 | /* Reads out the current hw, returning true if the connector is enabled |
| 205 | * and active (i.e. dpms ON state). */ | 194 | * and active (i.e. dpms ON state). */ |
| 206 | bool (*get_hw_state)(struct intel_connector *); | 195 | bool (*get_hw_state)(struct intel_connector *); |
| @@ -241,6 +230,14 @@ typedef struct dpll { | |||
| 241 | int p; | 230 | int p; |
| 242 | } intel_clock_t; | 231 | } intel_clock_t; |
| 243 | 232 | ||
| 233 | struct intel_atomic_state { | ||
| 234 | struct drm_atomic_state base; | ||
| 235 | |||
| 236 | unsigned int cdclk; | ||
| 237 | bool dpll_set; | ||
| 238 | struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; | ||
| 239 | }; | ||
| 240 | |||
| 244 | struct intel_plane_state { | 241 | struct intel_plane_state { |
| 245 | struct drm_plane_state base; | 242 | struct drm_plane_state base; |
| 246 | struct drm_rect src; | 243 | struct drm_rect src; |
| @@ -256,7 +253,7 @@ struct intel_plane_state { | |||
| 256 | * plane requiring a scaler: | 253 | * plane requiring a scaler: |
| 257 | * - During check_plane, its bit is set in | 254 | * - During check_plane, its bit is set in |
| 258 | * crtc_state->scaler_state.scaler_users by calling helper function | 255 | * crtc_state->scaler_state.scaler_users by calling helper function |
| 259 | * update_scaler_users. | 256 | * update_scaler_plane. |
| 260 | * - scaler_id indicates the scaler it got assigned. | 257 | * - scaler_id indicates the scaler it got assigned. |
| 261 | * | 258 | * |
| 262 | * plane doesn't require a scaler: | 259 | * plane doesn't require a scaler: |
| @@ -264,9 +261,11 @@ struct intel_plane_state { | |||
| 264 | * got disabled. | 261 | * got disabled. |
| 265 | * - During check_plane, corresponding bit is reset in | 262 | * - During check_plane, corresponding bit is reset in |
| 266 | * crtc_state->scaler_state.scaler_users by calling helper function | 263 | * crtc_state->scaler_state.scaler_users by calling helper function |
| 267 | * update_scaler_users. | 264 | * update_scaler_plane. |
| 268 | */ | 265 | */ |
| 269 | int scaler_id; | 266 | int scaler_id; |
| 267 | |||
| 268 | struct drm_intel_sprite_colorkey ckey; | ||
| 270 | }; | 269 | }; |
| 271 | 270 | ||
| 272 | struct intel_initial_plane_config { | 271 | struct intel_initial_plane_config { |
| @@ -286,7 +285,6 @@ struct intel_initial_plane_config { | |||
| 286 | #define SKL_MAX_DST_H 4096 | 285 | #define SKL_MAX_DST_H 4096 |
| 287 | 286 | ||
| 288 | struct intel_scaler { | 287 | struct intel_scaler { |
| 289 | int id; | ||
| 290 | int in_use; | 288 | int in_use; |
| 291 | uint32_t mode; | 289 | uint32_t mode; |
| 292 | }; | 290 | }; |
| @@ -319,6 +317,9 @@ struct intel_crtc_scaler_state { | |||
| 319 | int scaler_id; | 317 | int scaler_id; |
| 320 | }; | 318 | }; |
| 321 | 319 | ||
| 320 | /* drm_mode->private_flags */ | ||
| 321 | #define I915_MODE_FLAG_INHERITED 1 | ||
| 322 | |||
| 322 | struct intel_crtc_state { | 323 | struct intel_crtc_state { |
| 323 | struct drm_crtc_state base; | 324 | struct drm_crtc_state base; |
| 324 | 325 | ||
| @@ -331,7 +332,6 @@ struct intel_crtc_state { | |||
| 331 | * accordingly. | 332 | * accordingly. |
| 332 | */ | 333 | */ |
| 333 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ | 334 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ |
| 334 | #define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ | ||
| 335 | unsigned long quirks; | 335 | unsigned long quirks; |
| 336 | 336 | ||
| 337 | /* Pipe source size (ie. panel fitter input size) | 337 | /* Pipe source size (ie. panel fitter input size) |
| @@ -447,6 +447,18 @@ struct intel_crtc_state { | |||
| 447 | int pbn; | 447 | int pbn; |
| 448 | 448 | ||
| 449 | struct intel_crtc_scaler_state scaler_state; | 449 | struct intel_crtc_scaler_state scaler_state; |
| 450 | |||
| 451 | /* w/a for waiting 2 vblanks during crtc enable */ | ||
| 452 | enum pipe hsw_workaround_pipe; | ||
| 453 | }; | ||
| 454 | |||
| 455 | struct vlv_wm_state { | ||
| 456 | struct vlv_pipe_wm wm[3]; | ||
| 457 | struct vlv_sr_wm sr[3]; | ||
| 458 | uint8_t num_active_planes; | ||
| 459 | uint8_t num_levels; | ||
| 460 | uint8_t level; | ||
| 461 | bool cxsr; | ||
| 450 | }; | 462 | }; |
| 451 | 463 | ||
| 452 | struct intel_pipe_wm { | 464 | struct intel_pipe_wm { |
| @@ -478,16 +490,13 @@ struct skl_pipe_wm { | |||
| 478 | * and thus can't be run with interrupts disabled. | 490 | * and thus can't be run with interrupts disabled. |
| 479 | */ | 491 | */ |
| 480 | struct intel_crtc_atomic_commit { | 492 | struct intel_crtc_atomic_commit { |
| 481 | /* vblank evasion */ | ||
| 482 | bool evade; | ||
| 483 | unsigned start_vbl_count; | ||
| 484 | |||
| 485 | /* Sleepable operations to perform before commit */ | 493 | /* Sleepable operations to perform before commit */ |
| 486 | bool wait_for_flips; | 494 | bool wait_for_flips; |
| 487 | bool disable_fbc; | 495 | bool disable_fbc; |
| 488 | bool disable_ips; | 496 | bool disable_ips; |
| 497 | bool disable_cxsr; | ||
| 489 | bool pre_disable_primary; | 498 | bool pre_disable_primary; |
| 490 | bool update_wm; | 499 | bool update_wm_pre, update_wm_post; |
| 491 | unsigned disabled_planes; | 500 | unsigned disabled_planes; |
| 492 | 501 | ||
| 493 | /* Sleepable operations to perform after commit */ | 502 | /* Sleepable operations to perform after commit */ |
| @@ -527,9 +536,7 @@ struct intel_crtc { | |||
| 527 | uint32_t cursor_size; | 536 | uint32_t cursor_size; |
| 528 | uint32_t cursor_base; | 537 | uint32_t cursor_base; |
| 529 | 538 | ||
| 530 | struct intel_initial_plane_config plane_config; | ||
| 531 | struct intel_crtc_state *config; | 539 | struct intel_crtc_state *config; |
| 532 | bool new_enabled; | ||
| 533 | 540 | ||
| 534 | /* reset counter value when the last flip was submitted */ | 541 | /* reset counter value when the last flip was submitted */ |
| 535 | unsigned int reset_counter; | 542 | unsigned int reset_counter; |
| @@ -544,14 +551,19 @@ struct intel_crtc { | |||
| 544 | struct intel_pipe_wm active; | 551 | struct intel_pipe_wm active; |
| 545 | /* SKL wm values currently in use */ | 552 | /* SKL wm values currently in use */ |
| 546 | struct skl_pipe_wm skl_active; | 553 | struct skl_pipe_wm skl_active; |
| 554 | /* allow CxSR on this pipe */ | ||
| 555 | bool cxsr_allowed; | ||
| 547 | } wm; | 556 | } wm; |
| 548 | 557 | ||
| 549 | int scanline_offset; | 558 | int scanline_offset; |
| 550 | 559 | ||
| 560 | unsigned start_vbl_count; | ||
| 551 | struct intel_crtc_atomic_commit atomic; | 561 | struct intel_crtc_atomic_commit atomic; |
| 552 | 562 | ||
| 553 | /* scalers available on this crtc */ | 563 | /* scalers available on this crtc */ |
| 554 | int num_scalers; | 564 | int num_scalers; |
| 565 | |||
| 566 | struct vlv_wm_state wm_state; | ||
| 555 | }; | 567 | }; |
| 556 | 568 | ||
| 557 | struct intel_plane_wm_parameters { | 569 | struct intel_plane_wm_parameters { |
| @@ -570,6 +582,7 @@ struct intel_plane_wm_parameters { | |||
| 570 | bool scaled; | 582 | bool scaled; |
| 571 | u64 tiling; | 583 | u64 tiling; |
| 572 | unsigned int rotation; | 584 | unsigned int rotation; |
| 585 | uint16_t fifo_size; | ||
| 573 | }; | 586 | }; |
| 574 | 587 | ||
| 575 | struct intel_plane { | 588 | struct intel_plane { |
| @@ -578,9 +591,7 @@ struct intel_plane { | |||
| 578 | enum pipe pipe; | 591 | enum pipe pipe; |
| 579 | bool can_scale; | 592 | bool can_scale; |
| 580 | int max_downscale; | 593 | int max_downscale; |
| 581 | 594 | uint32_t frontbuffer_bit; | |
| 582 | /* FIXME convert to properties */ | ||
| 583 | struct drm_intel_sprite_colorkey ckey; | ||
| 584 | 595 | ||
| 585 | /* Since we need to change the watermarks before/after | 596 | /* Since we need to change the watermarks before/after |
| 586 | * enabling/disabling the planes, we need to store the parameters here | 597 | * enabling/disabling the planes, we need to store the parameters here |
| @@ -603,8 +614,9 @@ struct intel_plane { | |||
| 603 | uint32_t x, uint32_t y, | 614 | uint32_t x, uint32_t y, |
| 604 | uint32_t src_w, uint32_t src_h); | 615 | uint32_t src_w, uint32_t src_h); |
| 605 | void (*disable_plane)(struct drm_plane *plane, | 616 | void (*disable_plane)(struct drm_plane *plane, |
| 606 | struct drm_crtc *crtc, bool force); | 617 | struct drm_crtc *crtc); |
| 607 | int (*check_plane)(struct drm_plane *plane, | 618 | int (*check_plane)(struct drm_plane *plane, |
| 619 | struct intel_crtc_state *crtc_state, | ||
| 608 | struct intel_plane_state *state); | 620 | struct intel_plane_state *state); |
| 609 | void (*commit_plane)(struct drm_plane *plane, | 621 | void (*commit_plane)(struct drm_plane *plane, |
| 610 | struct intel_plane_state *state); | 622 | struct intel_plane_state *state); |
| @@ -629,6 +641,7 @@ struct cxsr_latency { | |||
| 629 | unsigned long cursor_hpll_disable; | 641 | unsigned long cursor_hpll_disable; |
| 630 | }; | 642 | }; |
| 631 | 643 | ||
| 644 | #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) | ||
| 632 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 645 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
| 633 | #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base) | 646 | #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base) |
| 634 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) | 647 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) |
| @@ -940,43 +953,23 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder); | |||
| 940 | void intel_ddi_clock_get(struct intel_encoder *encoder, | 953 | void intel_ddi_clock_get(struct intel_encoder *encoder, |
| 941 | struct intel_crtc_state *pipe_config); | 954 | struct intel_crtc_state *pipe_config); |
| 942 | void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); | 955 | void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); |
| 943 | void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, | 956 | uint32_t ddi_signal_levels(struct intel_dp *intel_dp); |
| 944 | enum port port, int type); | ||
| 945 | 957 | ||
| 946 | /* intel_frontbuffer.c */ | 958 | /* intel_frontbuffer.c */ |
| 947 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | 959 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, |
| 948 | struct intel_engine_cs *ring, | ||
| 949 | enum fb_op_origin origin); | 960 | enum fb_op_origin origin); |
| 950 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, | 961 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, |
| 951 | unsigned frontbuffer_bits); | 962 | unsigned frontbuffer_bits); |
| 952 | void intel_frontbuffer_flip_complete(struct drm_device *dev, | 963 | void intel_frontbuffer_flip_complete(struct drm_device *dev, |
| 953 | unsigned frontbuffer_bits); | 964 | unsigned frontbuffer_bits); |
| 954 | void intel_frontbuffer_flush(struct drm_device *dev, | ||
| 955 | unsigned frontbuffer_bits); | ||
| 956 | /** | ||
| 957 | * intel_frontbuffer_flip - synchronous frontbuffer flip | ||
| 958 | * @dev: DRM device | ||
| 959 | * @frontbuffer_bits: frontbuffer plane tracking bits | ||
| 960 | * | ||
| 961 | * This function gets called after scheduling a flip on @obj. This is for | ||
| 962 | * synchronous plane updates which will happen on the next vblank and which will | ||
| 963 | * not get delayed by pending gpu rendering. | ||
| 964 | * | ||
| 965 | * Can be called without any locks held. | ||
| 966 | */ | ||
| 967 | static inline | ||
| 968 | void intel_frontbuffer_flip(struct drm_device *dev, | 965 | void intel_frontbuffer_flip(struct drm_device *dev, |
| 969 | unsigned frontbuffer_bits) | 966 | unsigned frontbuffer_bits); |
| 970 | { | ||
| 971 | intel_frontbuffer_flush(dev, frontbuffer_bits); | ||
| 972 | } | ||
| 973 | |||
| 974 | unsigned int intel_fb_align_height(struct drm_device *dev, | 967 | unsigned int intel_fb_align_height(struct drm_device *dev, |
| 975 | unsigned int height, | 968 | unsigned int height, |
| 976 | uint32_t pixel_format, | 969 | uint32_t pixel_format, |
| 977 | uint64_t fb_format_modifier); | 970 | uint64_t fb_format_modifier); |
| 978 | void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); | 971 | void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire, |
| 979 | 972 | enum fb_op_origin origin); | |
| 980 | u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, | 973 | u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, |
| 981 | uint32_t pixel_format); | 974 | uint32_t pixel_format); |
| 982 | 975 | ||
| @@ -994,8 +987,8 @@ int intel_pch_rawclk(struct drm_device *dev); | |||
| 994 | void intel_mark_busy(struct drm_device *dev); | 987 | void intel_mark_busy(struct drm_device *dev); |
| 995 | void intel_mark_idle(struct drm_device *dev); | 988 | void intel_mark_idle(struct drm_device *dev); |
| 996 | void intel_crtc_restore_mode(struct drm_crtc *crtc); | 989 | void intel_crtc_restore_mode(struct drm_crtc *crtc); |
| 997 | void intel_crtc_control(struct drm_crtc *crtc, bool enable); | 990 | int intel_display_suspend(struct drm_device *dev); |
| 998 | void intel_crtc_reset(struct intel_crtc *crtc); | 991 | int intel_crtc_control(struct drm_crtc *crtc, bool enable); |
| 999 | void intel_crtc_update_dpms(struct drm_crtc *crtc); | 992 | void intel_crtc_update_dpms(struct drm_crtc *crtc); |
| 1000 | void intel_encoder_destroy(struct drm_encoder *encoder); | 993 | void intel_encoder_destroy(struct drm_encoder *encoder); |
| 1001 | int intel_connector_init(struct intel_connector *); | 994 | int intel_connector_init(struct intel_connector *); |
| @@ -1035,7 +1028,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, | |||
| 1035 | int intel_pin_and_fence_fb_obj(struct drm_plane *plane, | 1028 | int intel_pin_and_fence_fb_obj(struct drm_plane *plane, |
| 1036 | struct drm_framebuffer *fb, | 1029 | struct drm_framebuffer *fb, |
| 1037 | const struct drm_plane_state *plane_state, | 1030 | const struct drm_plane_state *plane_state, |
| 1038 | struct intel_engine_cs *pipelined); | 1031 | struct intel_engine_cs *pipelined, |
| 1032 | struct drm_i915_gem_request **pipelined_request); | ||
| 1039 | struct drm_framebuffer * | 1033 | struct drm_framebuffer * |
| 1040 | __intel_framebuffer_create(struct drm_device *dev, | 1034 | __intel_framebuffer_create(struct drm_device *dev, |
| 1041 | struct drm_mode_fb_cmd2 *mode_cmd, | 1035 | struct drm_mode_fb_cmd2 *mode_cmd, |
| @@ -1058,6 +1052,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, | |||
| 1058 | struct drm_plane_state *state, | 1052 | struct drm_plane_state *state, |
| 1059 | struct drm_property *property, | 1053 | struct drm_property *property, |
| 1060 | uint64_t val); | 1054 | uint64_t val); |
| 1055 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | ||
| 1056 | struct drm_plane_state *plane_state); | ||
| 1061 | 1057 | ||
| 1062 | unsigned int | 1058 | unsigned int |
| 1063 | intel_tile_height(struct drm_device *dev, uint32_t pixel_format, | 1059 | intel_tile_height(struct drm_device *dev, uint32_t pixel_format, |
| @@ -1072,9 +1068,6 @@ intel_rotation_90_or_270(unsigned int rotation) | |||
| 1072 | void intel_create_rotation_property(struct drm_device *dev, | 1068 | void intel_create_rotation_property(struct drm_device *dev, |
| 1073 | struct intel_plane *plane); | 1069 | struct intel_plane *plane); |
| 1074 | 1070 | ||
| 1075 | bool intel_wm_need_update(struct drm_plane *plane, | ||
| 1076 | struct drm_plane_state *state); | ||
| 1077 | |||
| 1078 | /* shared dpll functions */ | 1071 | /* shared dpll functions */ |
| 1079 | struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); | 1072 | struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); |
| 1080 | void assert_shared_dpll(struct drm_i915_private *dev_priv, | 1073 | void assert_shared_dpll(struct drm_i915_private *dev_priv, |
| @@ -1084,7 +1077,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, | |||
| 1084 | #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) | 1077 | #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) |
| 1085 | struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | 1078 | struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, |
| 1086 | struct intel_crtc_state *state); | 1079 | struct intel_crtc_state *state); |
| 1087 | void intel_put_shared_dpll(struct intel_crtc *crtc); | ||
| 1088 | 1080 | ||
| 1089 | void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, | 1081 | void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, |
| 1090 | const struct dpll *dpll); | 1082 | const struct dpll *dpll); |
| @@ -1104,7 +1096,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, | |||
| 1104 | void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); | 1096 | void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); |
| 1105 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) | 1097 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) |
| 1106 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) | 1098 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) |
| 1107 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, | 1099 | unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, |
| 1100 | int *x, int *y, | ||
| 1108 | unsigned int tiling_mode, | 1101 | unsigned int tiling_mode, |
| 1109 | unsigned int bpp, | 1102 | unsigned int bpp, |
| 1110 | unsigned int pitch); | 1103 | unsigned int pitch); |
| @@ -1114,7 +1107,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv); | |||
| 1114 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); | 1107 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); |
| 1115 | void broxton_init_cdclk(struct drm_device *dev); | 1108 | void broxton_init_cdclk(struct drm_device *dev); |
| 1116 | void broxton_uninit_cdclk(struct drm_device *dev); | 1109 | void broxton_uninit_cdclk(struct drm_device *dev); |
| 1117 | void broxton_set_cdclk(struct drm_device *dev, int frequency); | ||
| 1118 | void broxton_ddi_phy_init(struct drm_device *dev); | 1110 | void broxton_ddi_phy_init(struct drm_device *dev); |
| 1119 | void broxton_ddi_phy_uninit(struct drm_device *dev); | 1111 | void broxton_ddi_phy_uninit(struct drm_device *dev); |
| 1120 | void bxt_enable_dc9(struct drm_i915_private *dev_priv); | 1112 | void bxt_enable_dc9(struct drm_i915_private *dev_priv); |
| @@ -1130,6 +1122,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, | |||
| 1130 | int dotclock); | 1122 | int dotclock); |
| 1131 | bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, | 1123 | bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, |
| 1132 | intel_clock_t *best_clock); | 1124 | intel_clock_t *best_clock); |
| 1125 | int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); | ||
| 1126 | |||
| 1133 | bool intel_crtc_active(struct drm_crtc *crtc); | 1127 | bool intel_crtc_active(struct drm_crtc *crtc); |
| 1134 | void hsw_enable_ips(struct intel_crtc *crtc); | 1128 | void hsw_enable_ips(struct intel_crtc *crtc); |
| 1135 | void hsw_disable_ips(struct intel_crtc *crtc); | 1129 | void hsw_disable_ips(struct intel_crtc *crtc); |
| @@ -1139,10 +1133,8 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, | |||
| 1139 | struct intel_crtc_state *pipe_config); | 1133 | struct intel_crtc_state *pipe_config); |
| 1140 | void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); | 1134 | void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); |
| 1141 | void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); | 1135 | void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); |
| 1142 | void skl_detach_scalers(struct intel_crtc *intel_crtc); | 1136 | |
| 1143 | int skl_update_scaler_users(struct intel_crtc *intel_crtc, | 1137 | int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); |
| 1144 | struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane, | ||
| 1145 | struct intel_plane_state *plane_state, int force_detach); | ||
| 1146 | int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); | 1138 | int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); |
| 1147 | 1139 | ||
| 1148 | unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, | 1140 | unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, |
| @@ -1238,15 +1230,18 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) | |||
| 1238 | #endif | 1230 | #endif |
| 1239 | 1231 | ||
| 1240 | /* intel_fbc.c */ | 1232 | /* intel_fbc.c */ |
| 1241 | bool intel_fbc_enabled(struct drm_device *dev); | 1233 | bool intel_fbc_enabled(struct drm_i915_private *dev_priv); |
| 1242 | void intel_fbc_update(struct drm_device *dev); | 1234 | void intel_fbc_update(struct drm_i915_private *dev_priv); |
| 1243 | void intel_fbc_init(struct drm_i915_private *dev_priv); | 1235 | void intel_fbc_init(struct drm_i915_private *dev_priv); |
| 1244 | void intel_fbc_disable(struct drm_device *dev); | 1236 | void intel_fbc_disable(struct drm_i915_private *dev_priv); |
| 1237 | void intel_fbc_disable_crtc(struct intel_crtc *crtc); | ||
| 1245 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, | 1238 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, |
| 1246 | unsigned int frontbuffer_bits, | 1239 | unsigned int frontbuffer_bits, |
| 1247 | enum fb_op_origin origin); | 1240 | enum fb_op_origin origin); |
| 1248 | void intel_fbc_flush(struct drm_i915_private *dev_priv, | 1241 | void intel_fbc_flush(struct drm_i915_private *dev_priv, |
| 1249 | unsigned int frontbuffer_bits); | 1242 | unsigned int frontbuffer_bits); |
| 1243 | const char *intel_no_fbc_reason_str(enum no_fbc_reason reason); | ||
| 1244 | void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); | ||
| 1250 | 1245 | ||
| 1251 | /* intel_hdmi.c */ | 1246 | /* intel_hdmi.c */ |
| 1252 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); | 1247 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); |
| @@ -1314,11 +1309,13 @@ void intel_backlight_unregister(struct drm_device *dev); | |||
| 1314 | void intel_psr_enable(struct intel_dp *intel_dp); | 1309 | void intel_psr_enable(struct intel_dp *intel_dp); |
| 1315 | void intel_psr_disable(struct intel_dp *intel_dp); | 1310 | void intel_psr_disable(struct intel_dp *intel_dp); |
| 1316 | void intel_psr_invalidate(struct drm_device *dev, | 1311 | void intel_psr_invalidate(struct drm_device *dev, |
| 1317 | unsigned frontbuffer_bits); | 1312 | unsigned frontbuffer_bits); |
| 1318 | void intel_psr_flush(struct drm_device *dev, | 1313 | void intel_psr_flush(struct drm_device *dev, |
| 1319 | unsigned frontbuffer_bits); | 1314 | unsigned frontbuffer_bits, |
| 1315 | enum fb_op_origin origin); | ||
| 1320 | void intel_psr_init(struct drm_device *dev); | 1316 | void intel_psr_init(struct drm_device *dev); |
| 1321 | void intel_psr_single_frame_update(struct drm_device *dev); | 1317 | void intel_psr_single_frame_update(struct drm_device *dev, |
| 1318 | unsigned frontbuffer_bits); | ||
| 1322 | 1319 | ||
| 1323 | /* intel_runtime_pm.c */ | 1320 | /* intel_runtime_pm.c */ |
| 1324 | int intel_power_domains_init(struct drm_i915_private *); | 1321 | int intel_power_domains_init(struct drm_i915_private *); |
| @@ -1372,11 +1369,12 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, | |||
| 1372 | unsigned long submitted); | 1369 | unsigned long submitted); |
| 1373 | void intel_queue_rps_boost_for_request(struct drm_device *dev, | 1370 | void intel_queue_rps_boost_for_request(struct drm_device *dev, |
| 1374 | struct drm_i915_gem_request *req); | 1371 | struct drm_i915_gem_request *req); |
| 1372 | void vlv_wm_get_hw_state(struct drm_device *dev); | ||
| 1375 | void ilk_wm_get_hw_state(struct drm_device *dev); | 1373 | void ilk_wm_get_hw_state(struct drm_device *dev); |
| 1376 | void skl_wm_get_hw_state(struct drm_device *dev); | 1374 | void skl_wm_get_hw_state(struct drm_device *dev); |
| 1377 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | 1375 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
| 1378 | struct skl_ddb_allocation *ddb /* out */); | 1376 | struct skl_ddb_allocation *ddb /* out */); |
| 1379 | 1377 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); | |
| 1380 | 1378 | ||
| 1381 | /* intel_sdvo.c */ | 1379 | /* intel_sdvo.c */ |
| 1382 | bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); | 1380 | bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); |
| @@ -1384,10 +1382,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); | |||
| 1384 | 1382 | ||
| 1385 | /* intel_sprite.c */ | 1383 | /* intel_sprite.c */ |
| 1386 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); | 1384 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); |
| 1387 | int intel_plane_restore(struct drm_plane *plane); | ||
| 1388 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | 1385 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
| 1389 | struct drm_file *file_priv); | 1386 | struct drm_file *file_priv); |
| 1390 | bool intel_pipe_update_start(struct intel_crtc *crtc, | 1387 | void intel_pipe_update_start(struct intel_crtc *crtc, |
| 1391 | uint32_t *start_vbl_count); | 1388 | uint32_t *start_vbl_count); |
| 1392 | void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count); | 1389 | void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count); |
| 1393 | 1390 | ||
| @@ -1395,11 +1392,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count); | |||
| 1395 | void intel_tv_init(struct drm_device *dev); | 1392 | void intel_tv_init(struct drm_device *dev); |
| 1396 | 1393 | ||
| 1397 | /* intel_atomic.c */ | 1394 | /* intel_atomic.c */ |
| 1398 | int intel_atomic_check(struct drm_device *dev, | ||
| 1399 | struct drm_atomic_state *state); | ||
| 1400 | int intel_atomic_commit(struct drm_device *dev, | ||
| 1401 | struct drm_atomic_state *state, | ||
| 1402 | bool async); | ||
| 1403 | int intel_connector_atomic_get_property(struct drm_connector *connector, | 1395 | int intel_connector_atomic_get_property(struct drm_connector *connector, |
| 1404 | const struct drm_connector_state *state, | 1396 | const struct drm_connector_state *state, |
| 1405 | struct drm_property *property, | 1397 | struct drm_property *property, |
| @@ -1407,6 +1399,11 @@ int intel_connector_atomic_get_property(struct drm_connector *connector, | |||
| 1407 | struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); | 1399 | struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); |
| 1408 | void intel_crtc_destroy_state(struct drm_crtc *crtc, | 1400 | void intel_crtc_destroy_state(struct drm_crtc *crtc, |
| 1409 | struct drm_crtc_state *state); | 1401 | struct drm_crtc_state *state); |
| 1402 | struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev); | ||
| 1403 | void intel_atomic_state_clear(struct drm_atomic_state *); | ||
| 1404 | struct intel_shared_dpll_config * | ||
| 1405 | intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s); | ||
| 1406 | |||
| 1410 | static inline struct intel_crtc_state * | 1407 | static inline struct intel_crtc_state * |
| 1411 | intel_atomic_get_crtc_state(struct drm_atomic_state *state, | 1408 | intel_atomic_get_crtc_state(struct drm_atomic_state *state, |
| 1412 | struct intel_crtc *crtc) | 1409 | struct intel_crtc *crtc) |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index b5a5558ecd63..f4438eb5b458 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
| @@ -261,11 +261,6 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) | |||
| 261 | return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; | 261 | return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | static void intel_dsi_hot_plug(struct intel_encoder *encoder) | ||
| 265 | { | ||
| 266 | DRM_DEBUG_KMS("\n"); | ||
| 267 | } | ||
| 268 | |||
| 269 | static bool intel_dsi_compute_config(struct intel_encoder *encoder, | 264 | static bool intel_dsi_compute_config(struct intel_encoder *encoder, |
| 270 | struct intel_crtc_state *config) | 265 | struct intel_crtc_state *config) |
| 271 | { | 266 | { |
| @@ -418,12 +413,12 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) | |||
| 418 | /* Disable DPOunit clock gating, can stall pipe | 413 | /* Disable DPOunit clock gating, can stall pipe |
| 419 | * and we need DPLL REFA always enabled */ | 414 | * and we need DPLL REFA always enabled */ |
| 420 | tmp = I915_READ(DPLL(pipe)); | 415 | tmp = I915_READ(DPLL(pipe)); |
| 421 | tmp |= DPLL_REFA_CLK_ENABLE_VLV; | 416 | tmp |= DPLL_REF_CLK_ENABLE_VLV; |
| 422 | I915_WRITE(DPLL(pipe), tmp); | 417 | I915_WRITE(DPLL(pipe), tmp); |
| 423 | 418 | ||
| 424 | /* update the hw state for DPLL */ | 419 | /* update the hw state for DPLL */ |
| 425 | intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV | | 420 | intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | |
| 426 | DPLL_REFA_CLK_ENABLE_VLV; | 421 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; |
| 427 | 422 | ||
| 428 | tmp = I915_READ(DSPCLK_GATE_D); | 423 | tmp = I915_READ(DSPCLK_GATE_D); |
| 429 | tmp |= DPOUNIT_CLOCK_GATE_DISABLE; | 424 | tmp |= DPOUNIT_CLOCK_GATE_DISABLE; |
| @@ -1022,7 +1017,6 @@ void intel_dsi_init(struct drm_device *dev) | |||
| 1022 | drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); | 1017 | drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); |
| 1023 | 1018 | ||
| 1024 | /* XXX: very likely not all of these are needed */ | 1019 | /* XXX: very likely not all of these are needed */ |
| 1025 | intel_encoder->hot_plug = intel_dsi_hot_plug; | ||
| 1026 | intel_encoder->compute_config = intel_dsi_compute_config; | 1020 | intel_encoder->compute_config = intel_dsi_compute_config; |
| 1027 | intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable; | 1021 | intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable; |
| 1028 | intel_encoder->pre_enable = intel_dsi_pre_enable; | 1022 | intel_encoder->pre_enable = intel_dsi_pre_enable; |
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index d20cf37b6901..c6a8975b128f 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c | |||
| @@ -38,6 +38,27 @@ | |||
| 38 | #define DSI_HFP_PACKET_EXTRA_SIZE 6 | 38 | #define DSI_HFP_PACKET_EXTRA_SIZE 6 |
| 39 | #define DSI_EOTP_PACKET_SIZE 4 | 39 | #define DSI_EOTP_PACKET_SIZE 4 |
| 40 | 40 | ||
| 41 | static int dsi_pixel_format_bpp(int pixel_format) | ||
| 42 | { | ||
| 43 | int bpp; | ||
| 44 | |||
| 45 | switch (pixel_format) { | ||
| 46 | default: | ||
| 47 | case VID_MODE_FORMAT_RGB888: | ||
| 48 | case VID_MODE_FORMAT_RGB666_LOOSE: | ||
| 49 | bpp = 24; | ||
| 50 | break; | ||
| 51 | case VID_MODE_FORMAT_RGB666: | ||
| 52 | bpp = 18; | ||
| 53 | break; | ||
| 54 | case VID_MODE_FORMAT_RGB565: | ||
| 55 | bpp = 16; | ||
| 56 | break; | ||
| 57 | } | ||
| 58 | |||
| 59 | return bpp; | ||
| 60 | } | ||
| 61 | |||
| 41 | struct dsi_mnp { | 62 | struct dsi_mnp { |
| 42 | u32 dsi_pll_ctrl; | 63 | u32 dsi_pll_ctrl; |
| 43 | u32 dsi_pll_div; | 64 | u32 dsi_pll_div; |
| @@ -46,8 +67,8 @@ struct dsi_mnp { | |||
| 46 | static const u32 lfsr_converts[] = { | 67 | static const u32 lfsr_converts[] = { |
| 47 | 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */ | 68 | 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */ |
| 48 | 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ | 69 | 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ |
| 49 | 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */ | 70 | 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */ |
| 50 | 71, 35 /* 91 - 92 */ | 71 | 71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */ |
| 51 | }; | 72 | }; |
| 52 | 73 | ||
| 53 | #ifdef DSI_CLK_FROM_RR | 74 | #ifdef DSI_CLK_FROM_RR |
| @@ -65,19 +86,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode, | |||
| 65 | u32 dsi_bit_clock_hz; | 86 | u32 dsi_bit_clock_hz; |
| 66 | u32 dsi_clk; | 87 | u32 dsi_clk; |
| 67 | 88 | ||
| 68 | switch (pixel_format) { | 89 | bpp = dsi_pixel_format_bpp(pixel_format); |
| 69 | default: | ||
| 70 | case VID_MODE_FORMAT_RGB888: | ||
| 71 | case VID_MODE_FORMAT_RGB666_LOOSE: | ||
| 72 | bpp = 24; | ||
| 73 | break; | ||
| 74 | case VID_MODE_FORMAT_RGB666: | ||
| 75 | bpp = 18; | ||
| 76 | break; | ||
| 77 | case VID_MODE_FORMAT_RGB565: | ||
| 78 | bpp = 16; | ||
| 79 | break; | ||
| 80 | } | ||
| 81 | 90 | ||
| 82 | hactive = mode->hdisplay; | 91 | hactive = mode->hdisplay; |
| 83 | vactive = mode->vdisplay; | 92 | vactive = mode->vdisplay; |
| @@ -137,21 +146,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode, | |||
| 137 | static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) | 146 | static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) |
| 138 | { | 147 | { |
| 139 | u32 dsi_clk_khz; | 148 | u32 dsi_clk_khz; |
| 140 | u32 bpp; | 149 | u32 bpp = dsi_pixel_format_bpp(pixel_format); |
| 141 | |||
| 142 | switch (pixel_format) { | ||
| 143 | default: | ||
| 144 | case VID_MODE_FORMAT_RGB888: | ||
| 145 | case VID_MODE_FORMAT_RGB666_LOOSE: | ||
| 146 | bpp = 24; | ||
| 147 | break; | ||
| 148 | case VID_MODE_FORMAT_RGB666: | ||
| 149 | bpp = 18; | ||
| 150 | break; | ||
| 151 | case VID_MODE_FORMAT_RGB565: | ||
| 152 | bpp = 16; | ||
| 153 | break; | ||
| 154 | } | ||
| 155 | 150 | ||
| 156 | /* DSI data rate = pixel clock * bits per pixel / lane count | 151 | /* DSI data rate = pixel clock * bits per pixel / lane count |
| 157 | pixel clock is converted from KHz to Hz */ | 152 | pixel clock is converted from KHz to Hz */ |
| @@ -162,11 +157,13 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) | |||
| 162 | 157 | ||
| 163 | #endif | 158 | #endif |
| 164 | 159 | ||
| 165 | static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp) | 160 | static int dsi_calc_mnp(struct drm_i915_private *dev_priv, |
| 161 | struct dsi_mnp *dsi_mnp, int target_dsi_clk) | ||
| 166 | { | 162 | { |
| 167 | unsigned int calc_m = 0, calc_p = 0; | 163 | unsigned int calc_m = 0, calc_p = 0; |
| 168 | unsigned int m, n = 1, p; | 164 | unsigned int m_min, m_max, p_min = 2, p_max = 6; |
| 169 | int ref_clk = 25000; | 165 | unsigned int m, n, p; |
| 166 | int ref_clk; | ||
| 170 | int delta = target_dsi_clk; | 167 | int delta = target_dsi_clk; |
| 171 | u32 m_seed; | 168 | u32 m_seed; |
| 172 | 169 | ||
| @@ -176,8 +173,20 @@ static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp) | |||
| 176 | return -ECHRNG; | 173 | return -ECHRNG; |
| 177 | } | 174 | } |
| 178 | 175 | ||
| 179 | for (m = 62; m <= 92 && delta; m++) { | 176 | if (IS_CHERRYVIEW(dev_priv)) { |
| 180 | for (p = 2; p <= 6 && delta; p++) { | 177 | ref_clk = 100000; |
| 178 | n = 4; | ||
| 179 | m_min = 70; | ||
| 180 | m_max = 96; | ||
| 181 | } else { | ||
| 182 | ref_clk = 25000; | ||
| 183 | n = 1; | ||
| 184 | m_min = 62; | ||
| 185 | m_max = 92; | ||
| 186 | } | ||
| 187 | |||
| 188 | for (m = m_min; m <= m_max && delta; m++) { | ||
| 189 | for (p = p_min; p <= p_max && delta; p++) { | ||
| 181 | /* | 190 | /* |
| 182 | * Find the optimal m and p divisors with minimal delta | 191 | * Find the optimal m and p divisors with minimal delta |
| 183 | * +/- the required clock | 192 | * +/- the required clock |
| @@ -217,7 +226,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder) | |||
| 217 | dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, | 226 | dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, |
| 218 | intel_dsi->lane_count); | 227 | intel_dsi->lane_count); |
| 219 | 228 | ||
| 220 | ret = dsi_calc_mnp(dsi_clk, &dsi_mnp); | 229 | ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk); |
| 221 | if (ret) { | 230 | if (ret) { |
| 222 | DRM_DEBUG_KMS("dsi_calc_mnp failed\n"); | 231 | DRM_DEBUG_KMS("dsi_calc_mnp failed\n"); |
| 223 | return; | 232 | return; |
| @@ -286,21 +295,7 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder) | |||
| 286 | 295 | ||
| 287 | static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) | 296 | static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) |
| 288 | { | 297 | { |
| 289 | int bpp; | 298 | int bpp = dsi_pixel_format_bpp(pixel_format); |
| 290 | |||
| 291 | switch (pixel_format) { | ||
| 292 | default: | ||
| 293 | case VID_MODE_FORMAT_RGB888: | ||
| 294 | case VID_MODE_FORMAT_RGB666_LOOSE: | ||
| 295 | bpp = 24; | ||
| 296 | break; | ||
| 297 | case VID_MODE_FORMAT_RGB666: | ||
| 298 | bpp = 18; | ||
| 299 | break; | ||
| 300 | case VID_MODE_FORMAT_RGB565: | ||
| 301 | bpp = 16; | ||
| 302 | break; | ||
| 303 | } | ||
| 304 | 299 | ||
| 305 | WARN(bpp != pipe_bpp, | 300 | WARN(bpp != pipe_bpp, |
| 306 | "bpp match assertion failure (expected %d, current %d)\n", | 301 | "bpp match assertion failure (expected %d, current %d)\n", |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 6abb83432d4d..c271af767981 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
| @@ -41,9 +41,8 @@ | |||
| 41 | #include "intel_drv.h" | 41 | #include "intel_drv.h" |
| 42 | #include "i915_drv.h" | 42 | #include "i915_drv.h" |
| 43 | 43 | ||
| 44 | static void i8xx_fbc_disable(struct drm_device *dev) | 44 | static void i8xx_fbc_disable(struct drm_i915_private *dev_priv) |
| 45 | { | 45 | { |
| 46 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 47 | u32 fbc_ctl; | 46 | u32 fbc_ctl; |
| 48 | 47 | ||
| 49 | dev_priv->fbc.enabled = false; | 48 | dev_priv->fbc.enabled = false; |
| @@ -65,13 +64,11 @@ static void i8xx_fbc_disable(struct drm_device *dev) | |||
| 65 | DRM_DEBUG_KMS("disabled FBC\n"); | 64 | DRM_DEBUG_KMS("disabled FBC\n"); |
| 66 | } | 65 | } |
| 67 | 66 | ||
| 68 | static void i8xx_fbc_enable(struct drm_crtc *crtc) | 67 | static void i8xx_fbc_enable(struct intel_crtc *crtc) |
| 69 | { | 68 | { |
| 70 | struct drm_device *dev = crtc->dev; | 69 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 71 | struct drm_i915_private *dev_priv = dev->dev_private; | 70 | struct drm_framebuffer *fb = crtc->base.primary->fb; |
| 72 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
| 73 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 71 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 74 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 75 | int cfb_pitch; | 72 | int cfb_pitch; |
| 76 | int i; | 73 | int i; |
| 77 | u32 fbc_ctl; | 74 | u32 fbc_ctl; |
| @@ -84,7 +81,7 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc) | |||
| 84 | cfb_pitch = fb->pitches[0]; | 81 | cfb_pitch = fb->pitches[0]; |
| 85 | 82 | ||
| 86 | /* FBC_CTL wants 32B or 64B units */ | 83 | /* FBC_CTL wants 32B or 64B units */ |
| 87 | if (IS_GEN2(dev)) | 84 | if (IS_GEN2(dev_priv)) |
| 88 | cfb_pitch = (cfb_pitch / 32) - 1; | 85 | cfb_pitch = (cfb_pitch / 32) - 1; |
| 89 | else | 86 | else |
| 90 | cfb_pitch = (cfb_pitch / 64) - 1; | 87 | cfb_pitch = (cfb_pitch / 64) - 1; |
| @@ -93,66 +90,61 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc) | |||
| 93 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | 90 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) |
| 94 | I915_WRITE(FBC_TAG + (i * 4), 0); | 91 | I915_WRITE(FBC_TAG + (i * 4), 0); |
| 95 | 92 | ||
| 96 | if (IS_GEN4(dev)) { | 93 | if (IS_GEN4(dev_priv)) { |
| 97 | u32 fbc_ctl2; | 94 | u32 fbc_ctl2; |
| 98 | 95 | ||
| 99 | /* Set it up... */ | 96 | /* Set it up... */ |
| 100 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; | 97 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; |
| 101 | fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane); | 98 | fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane); |
| 102 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 99 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
| 103 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 100 | I915_WRITE(FBC_FENCE_OFF, crtc->base.y); |
| 104 | } | 101 | } |
| 105 | 102 | ||
| 106 | /* enable it... */ | 103 | /* enable it... */ |
| 107 | fbc_ctl = I915_READ(FBC_CONTROL); | 104 | fbc_ctl = I915_READ(FBC_CONTROL); |
| 108 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; | 105 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; |
| 109 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; | 106 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; |
| 110 | if (IS_I945GM(dev)) | 107 | if (IS_I945GM(dev_priv)) |
| 111 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 108 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
| 112 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 109 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
| 113 | fbc_ctl |= obj->fence_reg; | 110 | fbc_ctl |= obj->fence_reg; |
| 114 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 111 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
| 115 | 112 | ||
| 116 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", | 113 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", |
| 117 | cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); | 114 | cfb_pitch, crtc->base.y, plane_name(crtc->plane)); |
| 118 | } | 115 | } |
| 119 | 116 | ||
| 120 | static bool i8xx_fbc_enabled(struct drm_device *dev) | 117 | static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv) |
| 121 | { | 118 | { |
| 122 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 123 | |||
| 124 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | 119 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; |
| 125 | } | 120 | } |
| 126 | 121 | ||
| 127 | static void g4x_fbc_enable(struct drm_crtc *crtc) | 122 | static void g4x_fbc_enable(struct intel_crtc *crtc) |
| 128 | { | 123 | { |
| 129 | struct drm_device *dev = crtc->dev; | 124 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 130 | struct drm_i915_private *dev_priv = dev->dev_private; | 125 | struct drm_framebuffer *fb = crtc->base.primary->fb; |
| 131 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
| 132 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 126 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 133 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 134 | u32 dpfc_ctl; | 127 | u32 dpfc_ctl; |
| 135 | 128 | ||
| 136 | dev_priv->fbc.enabled = true; | 129 | dev_priv->fbc.enabled = true; |
| 137 | 130 | ||
| 138 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; | 131 | dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN; |
| 139 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | 132 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) |
| 140 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | 133 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; |
| 141 | else | 134 | else |
| 142 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | 135 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; |
| 143 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; | 136 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; |
| 144 | 137 | ||
| 145 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | 138 | I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y); |
| 146 | 139 | ||
| 147 | /* enable it... */ | 140 | /* enable it... */ |
| 148 | I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 141 | I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
| 149 | 142 | ||
| 150 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | 143 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); |
| 151 | } | 144 | } |
| 152 | 145 | ||
| 153 | static void g4x_fbc_disable(struct drm_device *dev) | 146 | static void g4x_fbc_disable(struct drm_i915_private *dev_priv) |
| 154 | { | 147 | { |
| 155 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 156 | u32 dpfc_ctl; | 148 | u32 dpfc_ctl; |
| 157 | 149 | ||
| 158 | dev_priv->fbc.enabled = false; | 150 | dev_priv->fbc.enabled = false; |
| @@ -167,10 +159,8 @@ static void g4x_fbc_disable(struct drm_device *dev) | |||
| 167 | } | 159 | } |
| 168 | } | 160 | } |
| 169 | 161 | ||
| 170 | static bool g4x_fbc_enabled(struct drm_device *dev) | 162 | static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv) |
| 171 | { | 163 | { |
| 172 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 173 | |||
| 174 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 164 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
| 175 | } | 165 | } |
| 176 | 166 | ||
| @@ -180,22 +170,21 @@ static void intel_fbc_nuke(struct drm_i915_private *dev_priv) | |||
| 180 | POSTING_READ(MSG_FBC_REND_STATE); | 170 | POSTING_READ(MSG_FBC_REND_STATE); |
| 181 | } | 171 | } |
| 182 | 172 | ||
| 183 | static void ilk_fbc_enable(struct drm_crtc *crtc) | 173 | static void ilk_fbc_enable(struct intel_crtc *crtc) |
| 184 | { | 174 | { |
| 185 | struct drm_device *dev = crtc->dev; | 175 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 186 | struct drm_i915_private *dev_priv = dev->dev_private; | 176 | struct drm_framebuffer *fb = crtc->base.primary->fb; |
| 187 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
| 188 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 177 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 189 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 190 | u32 dpfc_ctl; | 178 | u32 dpfc_ctl; |
| 179 | int threshold = dev_priv->fbc.threshold; | ||
| 191 | 180 | ||
| 192 | dev_priv->fbc.enabled = true; | 181 | dev_priv->fbc.enabled = true; |
| 193 | 182 | ||
| 194 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); | 183 | dpfc_ctl = DPFC_CTL_PLANE(crtc->plane); |
| 195 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | 184 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) |
| 196 | dev_priv->fbc.threshold++; | 185 | threshold++; |
| 197 | 186 | ||
| 198 | switch (dev_priv->fbc.threshold) { | 187 | switch (threshold) { |
| 199 | case 4: | 188 | case 4: |
| 200 | case 3: | 189 | case 3: |
| 201 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | 190 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; |
| @@ -208,28 +197,27 @@ static void ilk_fbc_enable(struct drm_crtc *crtc) | |||
| 208 | break; | 197 | break; |
| 209 | } | 198 | } |
| 210 | dpfc_ctl |= DPFC_CTL_FENCE_EN; | 199 | dpfc_ctl |= DPFC_CTL_FENCE_EN; |
| 211 | if (IS_GEN5(dev)) | 200 | if (IS_GEN5(dev_priv)) |
| 212 | dpfc_ctl |= obj->fence_reg; | 201 | dpfc_ctl |= obj->fence_reg; |
| 213 | 202 | ||
| 214 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 203 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y); |
| 215 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); | 204 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); |
| 216 | /* enable it... */ | 205 | /* enable it... */ |
| 217 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 206 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
| 218 | 207 | ||
| 219 | if (IS_GEN6(dev)) { | 208 | if (IS_GEN6(dev_priv)) { |
| 220 | I915_WRITE(SNB_DPFC_CTL_SA, | 209 | I915_WRITE(SNB_DPFC_CTL_SA, |
| 221 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | 210 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
| 222 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | 211 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y); |
| 223 | } | 212 | } |
| 224 | 213 | ||
| 225 | intel_fbc_nuke(dev_priv); | 214 | intel_fbc_nuke(dev_priv); |
| 226 | 215 | ||
| 227 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | 216 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); |
| 228 | } | 217 | } |
| 229 | 218 | ||
| 230 | static void ilk_fbc_disable(struct drm_device *dev) | 219 | static void ilk_fbc_disable(struct drm_i915_private *dev_priv) |
| 231 | { | 220 | { |
| 232 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 233 | u32 dpfc_ctl; | 221 | u32 dpfc_ctl; |
| 234 | 222 | ||
| 235 | dev_priv->fbc.enabled = false; | 223 | dev_priv->fbc.enabled = false; |
| @@ -244,29 +232,29 @@ static void ilk_fbc_disable(struct drm_device *dev) | |||
| 244 | } | 232 | } |
| 245 | } | 233 | } |
| 246 | 234 | ||
| 247 | static bool ilk_fbc_enabled(struct drm_device *dev) | 235 | static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv) |
| 248 | { | 236 | { |
| 249 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 250 | |||
| 251 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | 237 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; |
| 252 | } | 238 | } |
| 253 | 239 | ||
| 254 | static void gen7_fbc_enable(struct drm_crtc *crtc) | 240 | static void gen7_fbc_enable(struct intel_crtc *crtc) |
| 255 | { | 241 | { |
| 256 | struct drm_device *dev = crtc->dev; | 242 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 257 | struct drm_i915_private *dev_priv = dev->dev_private; | 243 | struct drm_framebuffer *fb = crtc->base.primary->fb; |
| 258 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
| 259 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 244 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 260 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 261 | u32 dpfc_ctl; | 245 | u32 dpfc_ctl; |
| 246 | int threshold = dev_priv->fbc.threshold; | ||
| 262 | 247 | ||
| 263 | dev_priv->fbc.enabled = true; | 248 | dev_priv->fbc.enabled = true; |
| 264 | 249 | ||
| 265 | dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); | 250 | dpfc_ctl = 0; |
| 251 | if (IS_IVYBRIDGE(dev_priv)) | ||
| 252 | dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane); | ||
| 253 | |||
| 266 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | 254 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) |
| 267 | dev_priv->fbc.threshold++; | 255 | threshold++; |
| 268 | 256 | ||
| 269 | switch (dev_priv->fbc.threshold) { | 257 | switch (threshold) { |
| 270 | case 4: | 258 | case 4: |
| 271 | case 3: | 259 | case 3: |
| 272 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | 260 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; |
| @@ -286,39 +274,37 @@ static void gen7_fbc_enable(struct drm_crtc *crtc) | |||
| 286 | 274 | ||
| 287 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 275 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
| 288 | 276 | ||
| 289 | if (IS_IVYBRIDGE(dev)) { | 277 | if (IS_IVYBRIDGE(dev_priv)) { |
| 290 | /* WaFbcAsynchFlipDisableFbcQueue:ivb */ | 278 | /* WaFbcAsynchFlipDisableFbcQueue:ivb */ |
| 291 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | 279 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
| 292 | I915_READ(ILK_DISPLAY_CHICKEN1) | | 280 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
| 293 | ILK_FBCQ_DIS); | 281 | ILK_FBCQ_DIS); |
| 294 | } else { | 282 | } else { |
| 295 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ | 283 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ |
| 296 | I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe), | 284 | I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe), |
| 297 | I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) | | 285 | I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) | |
| 298 | HSW_FBCQ_DIS); | 286 | HSW_FBCQ_DIS); |
| 299 | } | 287 | } |
| 300 | 288 | ||
| 301 | I915_WRITE(SNB_DPFC_CTL_SA, | 289 | I915_WRITE(SNB_DPFC_CTL_SA, |
| 302 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | 290 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
| 303 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | 291 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y); |
| 304 | 292 | ||
| 305 | intel_fbc_nuke(dev_priv); | 293 | intel_fbc_nuke(dev_priv); |
| 306 | 294 | ||
| 307 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | 295 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); |
| 308 | } | 296 | } |
| 309 | 297 | ||
| 310 | /** | 298 | /** |
| 311 | * intel_fbc_enabled - Is FBC enabled? | 299 | * intel_fbc_enabled - Is FBC enabled? |
| 312 | * @dev: the drm_device | 300 | * @dev_priv: i915 device instance |
| 313 | * | 301 | * |
| 314 | * This function is used to verify the current state of FBC. | 302 | * This function is used to verify the current state of FBC. |
| 315 | * FIXME: This should be tracked in the plane config eventually | 303 | * FIXME: This should be tracked in the plane config eventually |
| 316 | * instead of queried at runtime for most callers. | 304 | * instead of queried at runtime for most callers. |
| 317 | */ | 305 | */ |
| 318 | bool intel_fbc_enabled(struct drm_device *dev) | 306 | bool intel_fbc_enabled(struct drm_i915_private *dev_priv) |
| 319 | { | 307 | { |
| 320 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 321 | |||
| 322 | return dev_priv->fbc.enabled; | 308 | return dev_priv->fbc.enabled; |
| 323 | } | 309 | } |
| 324 | 310 | ||
| @@ -327,31 +313,33 @@ static void intel_fbc_work_fn(struct work_struct *__work) | |||
| 327 | struct intel_fbc_work *work = | 313 | struct intel_fbc_work *work = |
| 328 | container_of(to_delayed_work(__work), | 314 | container_of(to_delayed_work(__work), |
| 329 | struct intel_fbc_work, work); | 315 | struct intel_fbc_work, work); |
| 330 | struct drm_device *dev = work->crtc->dev; | 316 | struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private; |
| 331 | struct drm_i915_private *dev_priv = dev->dev_private; | 317 | struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb; |
| 332 | 318 | ||
| 333 | mutex_lock(&dev->struct_mutex); | 319 | mutex_lock(&dev_priv->fbc.lock); |
| 334 | if (work == dev_priv->fbc.fbc_work) { | 320 | if (work == dev_priv->fbc.fbc_work) { |
| 335 | /* Double check that we haven't switched fb without cancelling | 321 | /* Double check that we haven't switched fb without cancelling |
| 336 | * the prior work. | 322 | * the prior work. |
| 337 | */ | 323 | */ |
| 338 | if (work->crtc->primary->fb == work->fb) { | 324 | if (crtc_fb == work->fb) { |
| 339 | dev_priv->display.enable_fbc(work->crtc); | 325 | dev_priv->fbc.enable_fbc(work->crtc); |
| 340 | 326 | ||
| 341 | dev_priv->fbc.crtc = to_intel_crtc(work->crtc); | 327 | dev_priv->fbc.crtc = work->crtc; |
| 342 | dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id; | 328 | dev_priv->fbc.fb_id = crtc_fb->base.id; |
| 343 | dev_priv->fbc.y = work->crtc->y; | 329 | dev_priv->fbc.y = work->crtc->base.y; |
| 344 | } | 330 | } |
| 345 | 331 | ||
| 346 | dev_priv->fbc.fbc_work = NULL; | 332 | dev_priv->fbc.fbc_work = NULL; |
| 347 | } | 333 | } |
| 348 | mutex_unlock(&dev->struct_mutex); | 334 | mutex_unlock(&dev_priv->fbc.lock); |
| 349 | 335 | ||
| 350 | kfree(work); | 336 | kfree(work); |
| 351 | } | 337 | } |
| 352 | 338 | ||
| 353 | static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) | 339 | static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) |
| 354 | { | 340 | { |
| 341 | WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); | ||
| 342 | |||
| 355 | if (dev_priv->fbc.fbc_work == NULL) | 343 | if (dev_priv->fbc.fbc_work == NULL) |
| 356 | return; | 344 | return; |
| 357 | 345 | ||
| @@ -373,26 +361,24 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) | |||
| 373 | dev_priv->fbc.fbc_work = NULL; | 361 | dev_priv->fbc.fbc_work = NULL; |
| 374 | } | 362 | } |
| 375 | 363 | ||
| 376 | static void intel_fbc_enable(struct drm_crtc *crtc) | 364 | static void intel_fbc_enable(struct intel_crtc *crtc) |
| 377 | { | 365 | { |
| 378 | struct intel_fbc_work *work; | 366 | struct intel_fbc_work *work; |
| 379 | struct drm_device *dev = crtc->dev; | 367 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 380 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 381 | 368 | ||
| 382 | if (!dev_priv->display.enable_fbc) | 369 | WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); |
| 383 | return; | ||
| 384 | 370 | ||
| 385 | intel_fbc_cancel_work(dev_priv); | 371 | intel_fbc_cancel_work(dev_priv); |
| 386 | 372 | ||
| 387 | work = kzalloc(sizeof(*work), GFP_KERNEL); | 373 | work = kzalloc(sizeof(*work), GFP_KERNEL); |
| 388 | if (work == NULL) { | 374 | if (work == NULL) { |
| 389 | DRM_ERROR("Failed to allocate FBC work structure\n"); | 375 | DRM_ERROR("Failed to allocate FBC work structure\n"); |
| 390 | dev_priv->display.enable_fbc(crtc); | 376 | dev_priv->fbc.enable_fbc(crtc); |
| 391 | return; | 377 | return; |
| 392 | } | 378 | } |
| 393 | 379 | ||
| 394 | work->crtc = crtc; | 380 | work->crtc = crtc; |
| 395 | work->fb = crtc->primary->fb; | 381 | work->fb = crtc->base.primary->fb; |
| 396 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); | 382 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); |
| 397 | 383 | ||
| 398 | dev_priv->fbc.fbc_work = work; | 384 | dev_priv->fbc.fbc_work = work; |
| @@ -413,75 +399,274 @@ static void intel_fbc_enable(struct drm_crtc *crtc) | |||
| 413 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); | 399 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); |
| 414 | } | 400 | } |
| 415 | 401 | ||
| 402 | static void __intel_fbc_disable(struct drm_i915_private *dev_priv) | ||
| 403 | { | ||
| 404 | WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); | ||
| 405 | |||
| 406 | intel_fbc_cancel_work(dev_priv); | ||
| 407 | |||
| 408 | dev_priv->fbc.disable_fbc(dev_priv); | ||
| 409 | dev_priv->fbc.crtc = NULL; | ||
| 410 | } | ||
| 411 | |||
| 416 | /** | 412 | /** |
| 417 | * intel_fbc_disable - disable FBC | 413 | * intel_fbc_disable - disable FBC |
| 418 | * @dev: the drm_device | 414 | * @dev_priv: i915 device instance |
| 419 | * | 415 | * |
| 420 | * This function disables FBC. | 416 | * This function disables FBC. |
| 421 | */ | 417 | */ |
| 422 | void intel_fbc_disable(struct drm_device *dev) | 418 | void intel_fbc_disable(struct drm_i915_private *dev_priv) |
| 423 | { | 419 | { |
| 424 | struct drm_i915_private *dev_priv = dev->dev_private; | 420 | if (!dev_priv->fbc.enable_fbc) |
| 421 | return; | ||
| 425 | 422 | ||
| 426 | intel_fbc_cancel_work(dev_priv); | 423 | mutex_lock(&dev_priv->fbc.lock); |
| 424 | __intel_fbc_disable(dev_priv); | ||
| 425 | mutex_unlock(&dev_priv->fbc.lock); | ||
| 426 | } | ||
| 427 | |||
| 428 | /* | ||
| 429 | * intel_fbc_disable_crtc - disable FBC if it's associated with crtc | ||
| 430 | * @crtc: the CRTC | ||
| 431 | * | ||
| 432 | * This function disables FBC if it's associated with the provided CRTC. | ||
| 433 | */ | ||
| 434 | void intel_fbc_disable_crtc(struct intel_crtc *crtc) | ||
| 435 | { | ||
| 436 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | ||
| 427 | 437 | ||
| 428 | if (!dev_priv->display.disable_fbc) | 438 | if (!dev_priv->fbc.enable_fbc) |
| 429 | return; | 439 | return; |
| 430 | 440 | ||
| 431 | dev_priv->display.disable_fbc(dev); | 441 | mutex_lock(&dev_priv->fbc.lock); |
| 432 | dev_priv->fbc.crtc = NULL; | 442 | if (dev_priv->fbc.crtc == crtc) |
| 443 | __intel_fbc_disable(dev_priv); | ||
| 444 | mutex_unlock(&dev_priv->fbc.lock); | ||
| 433 | } | 445 | } |
| 434 | 446 | ||
| 435 | static bool set_no_fbc_reason(struct drm_i915_private *dev_priv, | 447 | const char *intel_no_fbc_reason_str(enum no_fbc_reason reason) |
| 448 | { | ||
| 449 | switch (reason) { | ||
| 450 | case FBC_OK: | ||
| 451 | return "FBC enabled but currently disabled in hardware"; | ||
| 452 | case FBC_UNSUPPORTED: | ||
| 453 | return "unsupported by this chipset"; | ||
| 454 | case FBC_NO_OUTPUT: | ||
| 455 | return "no output"; | ||
| 456 | case FBC_STOLEN_TOO_SMALL: | ||
| 457 | return "not enough stolen memory"; | ||
| 458 | case FBC_UNSUPPORTED_MODE: | ||
| 459 | return "mode incompatible with compression"; | ||
| 460 | case FBC_MODE_TOO_LARGE: | ||
| 461 | return "mode too large for compression"; | ||
| 462 | case FBC_BAD_PLANE: | ||
| 463 | return "FBC unsupported on plane"; | ||
| 464 | case FBC_NOT_TILED: | ||
| 465 | return "framebuffer not tiled or fenced"; | ||
| 466 | case FBC_MULTIPLE_PIPES: | ||
| 467 | return "more than one pipe active"; | ||
| 468 | case FBC_MODULE_PARAM: | ||
| 469 | return "disabled per module param"; | ||
| 470 | case FBC_CHIP_DEFAULT: | ||
| 471 | return "disabled per chip default"; | ||
| 472 | case FBC_ROTATION: | ||
| 473 | return "rotation unsupported"; | ||
| 474 | case FBC_IN_DBG_MASTER: | ||
| 475 | return "Kernel debugger is active"; | ||
| 476 | default: | ||
| 477 | MISSING_CASE(reason); | ||
| 478 | return "unknown reason"; | ||
| 479 | } | ||
| 480 | } | ||
| 481 | |||
| 482 | static void set_no_fbc_reason(struct drm_i915_private *dev_priv, | ||
| 436 | enum no_fbc_reason reason) | 483 | enum no_fbc_reason reason) |
| 437 | { | 484 | { |
| 438 | if (dev_priv->fbc.no_fbc_reason == reason) | 485 | if (dev_priv->fbc.no_fbc_reason == reason) |
| 439 | return false; | 486 | return; |
| 440 | 487 | ||
| 441 | dev_priv->fbc.no_fbc_reason = reason; | 488 | dev_priv->fbc.no_fbc_reason = reason; |
| 442 | return true; | 489 | DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason)); |
| 443 | } | 490 | } |
| 444 | 491 | ||
| 445 | static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) | 492 | static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) |
| 446 | { | 493 | { |
| 447 | struct drm_crtc *crtc = NULL, *tmp_crtc; | 494 | struct drm_crtc *crtc = NULL, *tmp_crtc; |
| 448 | enum pipe pipe; | 495 | enum pipe pipe; |
| 449 | bool pipe_a_only = false, one_pipe_only = false; | 496 | bool pipe_a_only = false; |
| 450 | 497 | ||
| 451 | if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) | 498 | if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) |
| 452 | pipe_a_only = true; | 499 | pipe_a_only = true; |
| 453 | else if (INTEL_INFO(dev_priv)->gen <= 4) | ||
| 454 | one_pipe_only = true; | ||
| 455 | 500 | ||
| 456 | for_each_pipe(dev_priv, pipe) { | 501 | for_each_pipe(dev_priv, pipe) { |
| 457 | tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 502 | tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 458 | 503 | ||
| 459 | if (intel_crtc_active(tmp_crtc) && | 504 | if (intel_crtc_active(tmp_crtc) && |
| 460 | to_intel_plane_state(tmp_crtc->primary->state)->visible) { | 505 | to_intel_plane_state(tmp_crtc->primary->state)->visible) |
| 461 | if (one_pipe_only && crtc) { | ||
| 462 | if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) | ||
| 463 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
| 464 | return NULL; | ||
| 465 | } | ||
| 466 | crtc = tmp_crtc; | 506 | crtc = tmp_crtc; |
| 467 | } | ||
| 468 | 507 | ||
| 469 | if (pipe_a_only) | 508 | if (pipe_a_only) |
| 470 | break; | 509 | break; |
| 471 | } | 510 | } |
| 472 | 511 | ||
| 473 | if (!crtc || crtc->primary->fb == NULL) { | 512 | if (!crtc || crtc->primary->fb == NULL) |
| 474 | if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) | ||
| 475 | DRM_DEBUG_KMS("no output, disabling\n"); | ||
| 476 | return NULL; | 513 | return NULL; |
| 477 | } | ||
| 478 | 514 | ||
| 479 | return crtc; | 515 | return crtc; |
| 480 | } | 516 | } |
| 481 | 517 | ||
| 518 | static bool multiple_pipes_ok(struct drm_i915_private *dev_priv) | ||
| 519 | { | ||
| 520 | enum pipe pipe; | ||
| 521 | int n_pipes = 0; | ||
| 522 | struct drm_crtc *crtc; | ||
| 523 | |||
| 524 | if (INTEL_INFO(dev_priv)->gen > 4) | ||
| 525 | return true; | ||
| 526 | |||
| 527 | for_each_pipe(dev_priv, pipe) { | ||
| 528 | crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 529 | |||
| 530 | if (intel_crtc_active(crtc) && | ||
| 531 | to_intel_plane_state(crtc->primary->state)->visible) | ||
| 532 | n_pipes++; | ||
| 533 | } | ||
| 534 | |||
| 535 | return (n_pipes < 2); | ||
| 536 | } | ||
| 537 | |||
| 538 | static int find_compression_threshold(struct drm_i915_private *dev_priv, | ||
| 539 | struct drm_mm_node *node, | ||
| 540 | int size, | ||
| 541 | int fb_cpp) | ||
| 542 | { | ||
| 543 | int compression_threshold = 1; | ||
| 544 | int ret; | ||
| 545 | |||
| 546 | /* HACK: This code depends on what we will do in *_enable_fbc. If that | ||
| 547 | * code changes, this code needs to change as well. | ||
| 548 | * | ||
| 549 | * The enable_fbc code will attempt to use one of our 2 compression | ||
| 550 | * thresholds, therefore, in that case, we only have 1 resort. | ||
| 551 | */ | ||
| 552 | |||
| 553 | /* Try to over-allocate to reduce reallocations and fragmentation. */ | ||
| 554 | ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096); | ||
| 555 | if (ret == 0) | ||
| 556 | return compression_threshold; | ||
| 557 | |||
| 558 | again: | ||
| 559 | /* HW's ability to limit the CFB is 1:4 */ | ||
| 560 | if (compression_threshold > 4 || | ||
| 561 | (fb_cpp == 2 && compression_threshold == 2)) | ||
| 562 | return 0; | ||
| 563 | |||
| 564 | ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096); | ||
| 565 | if (ret && INTEL_INFO(dev_priv)->gen <= 4) { | ||
| 566 | return 0; | ||
| 567 | } else if (ret) { | ||
| 568 | compression_threshold <<= 1; | ||
| 569 | goto again; | ||
| 570 | } else { | ||
| 571 | return compression_threshold; | ||
| 572 | } | ||
| 573 | } | ||
| 574 | |||
| 575 | static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size, | ||
| 576 | int fb_cpp) | ||
| 577 | { | ||
| 578 | struct drm_mm_node *uninitialized_var(compressed_llb); | ||
| 579 | int ret; | ||
| 580 | |||
| 581 | ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb, | ||
| 582 | size, fb_cpp); | ||
| 583 | if (!ret) | ||
| 584 | goto err_llb; | ||
| 585 | else if (ret > 1) { | ||
| 586 | DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); | ||
| 587 | |||
| 588 | } | ||
| 589 | |||
| 590 | dev_priv->fbc.threshold = ret; | ||
| 591 | |||
| 592 | if (INTEL_INFO(dev_priv)->gen >= 5) | ||
| 593 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); | ||
| 594 | else if (IS_GM45(dev_priv)) { | ||
| 595 | I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); | ||
| 596 | } else { | ||
| 597 | compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); | ||
| 598 | if (!compressed_llb) | ||
| 599 | goto err_fb; | ||
| 600 | |||
| 601 | ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, | ||
| 602 | 4096, 4096); | ||
| 603 | if (ret) | ||
| 604 | goto err_fb; | ||
| 605 | |||
| 606 | dev_priv->fbc.compressed_llb = compressed_llb; | ||
| 607 | |||
| 608 | I915_WRITE(FBC_CFB_BASE, | ||
| 609 | dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); | ||
| 610 | I915_WRITE(FBC_LL_BASE, | ||
| 611 | dev_priv->mm.stolen_base + compressed_llb->start); | ||
| 612 | } | ||
| 613 | |||
| 614 | dev_priv->fbc.uncompressed_size = size; | ||
| 615 | |||
| 616 | DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", | ||
| 617 | size); | ||
| 618 | |||
| 619 | return 0; | ||
| 620 | |||
| 621 | err_fb: | ||
| 622 | kfree(compressed_llb); | ||
| 623 | i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb); | ||
| 624 | err_llb: | ||
| 625 | pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); | ||
| 626 | return -ENOSPC; | ||
| 627 | } | ||
| 628 | |||
| 629 | static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) | ||
| 630 | { | ||
| 631 | if (dev_priv->fbc.uncompressed_size == 0) | ||
| 632 | return; | ||
| 633 | |||
| 634 | i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb); | ||
| 635 | |||
| 636 | if (dev_priv->fbc.compressed_llb) { | ||
| 637 | i915_gem_stolen_remove_node(dev_priv, | ||
| 638 | dev_priv->fbc.compressed_llb); | ||
| 639 | kfree(dev_priv->fbc.compressed_llb); | ||
| 640 | } | ||
| 641 | |||
| 642 | dev_priv->fbc.uncompressed_size = 0; | ||
| 643 | } | ||
| 644 | |||
| 645 | void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) | ||
| 646 | { | ||
| 647 | if (!dev_priv->fbc.enable_fbc) | ||
| 648 | return; | ||
| 649 | |||
| 650 | mutex_lock(&dev_priv->fbc.lock); | ||
| 651 | __intel_fbc_cleanup_cfb(dev_priv); | ||
| 652 | mutex_unlock(&dev_priv->fbc.lock); | ||
| 653 | } | ||
| 654 | |||
| 655 | static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size, | ||
| 656 | int fb_cpp) | ||
| 657 | { | ||
| 658 | if (size <= dev_priv->fbc.uncompressed_size) | ||
| 659 | return 0; | ||
| 660 | |||
| 661 | /* Release any current block */ | ||
| 662 | __intel_fbc_cleanup_cfb(dev_priv); | ||
| 663 | |||
| 664 | return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp); | ||
| 665 | } | ||
| 666 | |||
| 482 | /** | 667 | /** |
| 483 | * intel_fbc_update - enable/disable FBC as needed | 668 | * __intel_fbc_update - enable/disable FBC as needed, unlocked |
| 484 | * @dev: the drm_device | 669 | * @dev_priv: i915 device instance |
| 485 | * | 670 | * |
| 486 | * Set up the framebuffer compression hardware at mode set time. We | 671 | * Set up the framebuffer compression hardware at mode set time. We |
| 487 | * enable it if possible: | 672 | * enable it if possible: |
| @@ -498,9 +683,8 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) | |||
| 498 | * | 683 | * |
| 499 | * We need to enable/disable FBC on a global basis. | 684 | * We need to enable/disable FBC on a global basis. |
| 500 | */ | 685 | */ |
| 501 | void intel_fbc_update(struct drm_device *dev) | 686 | static void __intel_fbc_update(struct drm_i915_private *dev_priv) |
| 502 | { | 687 | { |
| 503 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 504 | struct drm_crtc *crtc = NULL; | 688 | struct drm_crtc *crtc = NULL; |
| 505 | struct intel_crtc *intel_crtc; | 689 | struct intel_crtc *intel_crtc; |
| 506 | struct drm_framebuffer *fb; | 690 | struct drm_framebuffer *fb; |
| @@ -508,22 +692,19 @@ void intel_fbc_update(struct drm_device *dev) | |||
| 508 | const struct drm_display_mode *adjusted_mode; | 692 | const struct drm_display_mode *adjusted_mode; |
| 509 | unsigned int max_width, max_height; | 693 | unsigned int max_width, max_height; |
| 510 | 694 | ||
| 511 | if (!HAS_FBC(dev)) | 695 | WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); |
| 512 | return; | ||
| 513 | 696 | ||
| 514 | /* disable framebuffer compression in vGPU */ | 697 | /* disable framebuffer compression in vGPU */ |
| 515 | if (intel_vgpu_active(dev)) | 698 | if (intel_vgpu_active(dev_priv->dev)) |
| 516 | i915.enable_fbc = 0; | 699 | i915.enable_fbc = 0; |
| 517 | 700 | ||
| 518 | if (i915.enable_fbc < 0) { | 701 | if (i915.enable_fbc < 0) { |
| 519 | if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) | 702 | set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT); |
| 520 | DRM_DEBUG_KMS("disabled per chip default\n"); | ||
| 521 | goto out_disable; | 703 | goto out_disable; |
| 522 | } | 704 | } |
| 523 | 705 | ||
| 524 | if (!i915.enable_fbc) { | 706 | if (!i915.enable_fbc) { |
| 525 | if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) | 707 | set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM); |
| 526 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
| 527 | goto out_disable; | 708 | goto out_disable; |
| 528 | } | 709 | } |
| 529 | 710 | ||
| @@ -537,8 +718,15 @@ void intel_fbc_update(struct drm_device *dev) | |||
| 537 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 718 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
| 538 | */ | 719 | */ |
| 539 | crtc = intel_fbc_find_crtc(dev_priv); | 720 | crtc = intel_fbc_find_crtc(dev_priv); |
| 540 | if (!crtc) | 721 | if (!crtc) { |
| 722 | set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT); | ||
| 723 | goto out_disable; | ||
| 724 | } | ||
| 725 | |||
| 726 | if (!multiple_pipes_ok(dev_priv)) { | ||
| 727 | set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES); | ||
| 541 | goto out_disable; | 728 | goto out_disable; |
| 729 | } | ||
| 542 | 730 | ||
| 543 | intel_crtc = to_intel_crtc(crtc); | 731 | intel_crtc = to_intel_crtc(crtc); |
| 544 | fb = crtc->primary->fb; | 732 | fb = crtc->primary->fb; |
| @@ -547,16 +735,14 @@ void intel_fbc_update(struct drm_device *dev) | |||
| 547 | 735 | ||
| 548 | if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || | 736 | if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || |
| 549 | (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | 737 | (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { |
| 550 | if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) | 738 | set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE); |
| 551 | DRM_DEBUG_KMS("mode incompatible with compression, " | ||
| 552 | "disabling\n"); | ||
| 553 | goto out_disable; | 739 | goto out_disable; |
| 554 | } | 740 | } |
| 555 | 741 | ||
| 556 | if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { | 742 | if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { |
| 557 | max_width = 4096; | 743 | max_width = 4096; |
| 558 | max_height = 4096; | 744 | max_height = 4096; |
| 559 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 745 | } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { |
| 560 | max_width = 4096; | 746 | max_width = 4096; |
| 561 | max_height = 2048; | 747 | max_height = 2048; |
| 562 | } else { | 748 | } else { |
| @@ -565,14 +751,12 @@ void intel_fbc_update(struct drm_device *dev) | |||
| 565 | } | 751 | } |
| 566 | if (intel_crtc->config->pipe_src_w > max_width || | 752 | if (intel_crtc->config->pipe_src_w > max_width || |
| 567 | intel_crtc->config->pipe_src_h > max_height) { | 753 | intel_crtc->config->pipe_src_h > max_height) { |
| 568 | if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) | 754 | set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE); |
| 569 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | ||
| 570 | goto out_disable; | 755 | goto out_disable; |
| 571 | } | 756 | } |
| 572 | if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) && | 757 | if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) && |
| 573 | intel_crtc->plane != PLANE_A) { | 758 | intel_crtc->plane != PLANE_A) { |
| 574 | if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) | 759 | set_no_fbc_reason(dev_priv, FBC_BAD_PLANE); |
| 575 | DRM_DEBUG_KMS("plane not A, disabling compression\n"); | ||
| 576 | goto out_disable; | 760 | goto out_disable; |
| 577 | } | 761 | } |
| 578 | 762 | ||
| @@ -581,25 +765,24 @@ void intel_fbc_update(struct drm_device *dev) | |||
| 581 | */ | 765 | */ |
| 582 | if (obj->tiling_mode != I915_TILING_X || | 766 | if (obj->tiling_mode != I915_TILING_X || |
| 583 | obj->fence_reg == I915_FENCE_REG_NONE) { | 767 | obj->fence_reg == I915_FENCE_REG_NONE) { |
| 584 | if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED)) | 768 | set_no_fbc_reason(dev_priv, FBC_NOT_TILED); |
| 585 | DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); | ||
| 586 | goto out_disable; | 769 | goto out_disable; |
| 587 | } | 770 | } |
| 588 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && | 771 | if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && |
| 589 | crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) { | 772 | crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) { |
| 590 | if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) | 773 | set_no_fbc_reason(dev_priv, FBC_ROTATION); |
| 591 | DRM_DEBUG_KMS("Rotation unsupported, disabling\n"); | ||
| 592 | goto out_disable; | 774 | goto out_disable; |
| 593 | } | 775 | } |
| 594 | 776 | ||
| 595 | /* If the kernel debugger is active, always disable compression */ | 777 | /* If the kernel debugger is active, always disable compression */ |
| 596 | if (in_dbg_master()) | 778 | if (in_dbg_master()) { |
| 779 | set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER); | ||
| 597 | goto out_disable; | 780 | goto out_disable; |
| 781 | } | ||
| 598 | 782 | ||
| 599 | if (i915_gem_stolen_setup_compression(dev, obj->base.size, | 783 | if (intel_fbc_setup_cfb(dev_priv, obj->base.size, |
| 600 | drm_format_plane_cpp(fb->pixel_format, 0))) { | 784 | drm_format_plane_cpp(fb->pixel_format, 0))) { |
| 601 | if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) | 785 | set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL); |
| 602 | DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); | ||
| 603 | goto out_disable; | 786 | goto out_disable; |
| 604 | } | 787 | } |
| 605 | 788 | ||
| @@ -613,7 +796,7 @@ void intel_fbc_update(struct drm_device *dev) | |||
| 613 | dev_priv->fbc.y == crtc->y) | 796 | dev_priv->fbc.y == crtc->y) |
| 614 | return; | 797 | return; |
| 615 | 798 | ||
| 616 | if (intel_fbc_enabled(dev)) { | 799 | if (intel_fbc_enabled(dev_priv)) { |
| 617 | /* We update FBC along two paths, after changing fb/crtc | 800 | /* We update FBC along two paths, after changing fb/crtc |
| 618 | * configuration (modeswitching) and after page-flipping | 801 | * configuration (modeswitching) and after page-flipping |
| 619 | * finishes. For the latter, we know that not only did | 802 | * finishes. For the latter, we know that not only did |
| @@ -638,58 +821,86 @@ void intel_fbc_update(struct drm_device *dev) | |||
| 638 | * some point. And we wait before enabling FBC anyway. | 821 | * some point. And we wait before enabling FBC anyway. |
| 639 | */ | 822 | */ |
| 640 | DRM_DEBUG_KMS("disabling active FBC for update\n"); | 823 | DRM_DEBUG_KMS("disabling active FBC for update\n"); |
| 641 | intel_fbc_disable(dev); | 824 | __intel_fbc_disable(dev_priv); |
| 642 | } | 825 | } |
| 643 | 826 | ||
| 644 | intel_fbc_enable(crtc); | 827 | intel_fbc_enable(intel_crtc); |
| 645 | dev_priv->fbc.no_fbc_reason = FBC_OK; | 828 | dev_priv->fbc.no_fbc_reason = FBC_OK; |
| 646 | return; | 829 | return; |
| 647 | 830 | ||
| 648 | out_disable: | 831 | out_disable: |
| 649 | /* Multiple disables should be harmless */ | 832 | /* Multiple disables should be harmless */ |
| 650 | if (intel_fbc_enabled(dev)) { | 833 | if (intel_fbc_enabled(dev_priv)) { |
| 651 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); | 834 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); |
| 652 | intel_fbc_disable(dev); | 835 | __intel_fbc_disable(dev_priv); |
| 653 | } | 836 | } |
| 654 | i915_gem_stolen_cleanup_compression(dev); | 837 | __intel_fbc_cleanup_cfb(dev_priv); |
| 838 | } | ||
| 839 | |||
| 840 | /* | ||
| 841 | * intel_fbc_update - enable/disable FBC as needed | ||
| 842 | * @dev_priv: i915 device instance | ||
| 843 | * | ||
| 844 | * This function reevaluates the overall state and enables or disables FBC. | ||
| 845 | */ | ||
| 846 | void intel_fbc_update(struct drm_i915_private *dev_priv) | ||
| 847 | { | ||
| 848 | if (!dev_priv->fbc.enable_fbc) | ||
| 849 | return; | ||
| 850 | |||
| 851 | mutex_lock(&dev_priv->fbc.lock); | ||
| 852 | __intel_fbc_update(dev_priv); | ||
| 853 | mutex_unlock(&dev_priv->fbc.lock); | ||
| 655 | } | 854 | } |
| 656 | 855 | ||
| 657 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, | 856 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, |
| 658 | unsigned int frontbuffer_bits, | 857 | unsigned int frontbuffer_bits, |
| 659 | enum fb_op_origin origin) | 858 | enum fb_op_origin origin) |
| 660 | { | 859 | { |
| 661 | struct drm_device *dev = dev_priv->dev; | ||
| 662 | unsigned int fbc_bits; | 860 | unsigned int fbc_bits; |
| 663 | 861 | ||
| 862 | if (!dev_priv->fbc.enable_fbc) | ||
| 863 | return; | ||
| 864 | |||
| 664 | if (origin == ORIGIN_GTT) | 865 | if (origin == ORIGIN_GTT) |
| 665 | return; | 866 | return; |
| 666 | 867 | ||
| 868 | mutex_lock(&dev_priv->fbc.lock); | ||
| 869 | |||
| 667 | if (dev_priv->fbc.enabled) | 870 | if (dev_priv->fbc.enabled) |
| 668 | fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); | 871 | fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); |
| 669 | else if (dev_priv->fbc.fbc_work) | 872 | else if (dev_priv->fbc.fbc_work) |
| 670 | fbc_bits = INTEL_FRONTBUFFER_PRIMARY( | 873 | fbc_bits = INTEL_FRONTBUFFER_PRIMARY( |
| 671 | to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe); | 874 | dev_priv->fbc.fbc_work->crtc->pipe); |
| 672 | else | 875 | else |
| 673 | fbc_bits = dev_priv->fbc.possible_framebuffer_bits; | 876 | fbc_bits = dev_priv->fbc.possible_framebuffer_bits; |
| 674 | 877 | ||
| 675 | dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); | 878 | dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); |
| 676 | 879 | ||
| 677 | if (dev_priv->fbc.busy_bits) | 880 | if (dev_priv->fbc.busy_bits) |
| 678 | intel_fbc_disable(dev); | 881 | __intel_fbc_disable(dev_priv); |
| 882 | |||
| 883 | mutex_unlock(&dev_priv->fbc.lock); | ||
| 679 | } | 884 | } |
| 680 | 885 | ||
| 681 | void intel_fbc_flush(struct drm_i915_private *dev_priv, | 886 | void intel_fbc_flush(struct drm_i915_private *dev_priv, |
| 682 | unsigned int frontbuffer_bits) | 887 | unsigned int frontbuffer_bits) |
| 683 | { | 888 | { |
| 684 | struct drm_device *dev = dev_priv->dev; | 889 | if (!dev_priv->fbc.enable_fbc) |
| 890 | return; | ||
| 891 | |||
| 892 | mutex_lock(&dev_priv->fbc.lock); | ||
| 685 | 893 | ||
| 686 | if (!dev_priv->fbc.busy_bits) | 894 | if (!dev_priv->fbc.busy_bits) |
| 687 | return; | 895 | goto out; |
| 688 | 896 | ||
| 689 | dev_priv->fbc.busy_bits &= ~frontbuffer_bits; | 897 | dev_priv->fbc.busy_bits &= ~frontbuffer_bits; |
| 690 | 898 | ||
| 691 | if (!dev_priv->fbc.busy_bits) | 899 | if (!dev_priv->fbc.busy_bits) |
| 692 | intel_fbc_update(dev); | 900 | __intel_fbc_update(dev_priv); |
| 901 | |||
| 902 | out: | ||
| 903 | mutex_unlock(&dev_priv->fbc.lock); | ||
| 693 | } | 904 | } |
| 694 | 905 | ||
| 695 | /** | 906 | /** |
| @@ -702,6 +913,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) | |||
| 702 | { | 913 | { |
| 703 | enum pipe pipe; | 914 | enum pipe pipe; |
| 704 | 915 | ||
| 916 | mutex_init(&dev_priv->fbc.lock); | ||
| 917 | |||
| 705 | if (!HAS_FBC(dev_priv)) { | 918 | if (!HAS_FBC(dev_priv)) { |
| 706 | dev_priv->fbc.enabled = false; | 919 | dev_priv->fbc.enabled = false; |
| 707 | dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED; | 920 | dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED; |
| @@ -717,25 +930,25 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) | |||
| 717 | } | 930 | } |
| 718 | 931 | ||
| 719 | if (INTEL_INFO(dev_priv)->gen >= 7) { | 932 | if (INTEL_INFO(dev_priv)->gen >= 7) { |
| 720 | dev_priv->display.fbc_enabled = ilk_fbc_enabled; | 933 | dev_priv->fbc.fbc_enabled = ilk_fbc_enabled; |
| 721 | dev_priv->display.enable_fbc = gen7_fbc_enable; | 934 | dev_priv->fbc.enable_fbc = gen7_fbc_enable; |
| 722 | dev_priv->display.disable_fbc = ilk_fbc_disable; | 935 | dev_priv->fbc.disable_fbc = ilk_fbc_disable; |
| 723 | } else if (INTEL_INFO(dev_priv)->gen >= 5) { | 936 | } else if (INTEL_INFO(dev_priv)->gen >= 5) { |
| 724 | dev_priv->display.fbc_enabled = ilk_fbc_enabled; | 937 | dev_priv->fbc.fbc_enabled = ilk_fbc_enabled; |
| 725 | dev_priv->display.enable_fbc = ilk_fbc_enable; | 938 | dev_priv->fbc.enable_fbc = ilk_fbc_enable; |
| 726 | dev_priv->display.disable_fbc = ilk_fbc_disable; | 939 | dev_priv->fbc.disable_fbc = ilk_fbc_disable; |
| 727 | } else if (IS_GM45(dev_priv)) { | 940 | } else if (IS_GM45(dev_priv)) { |
| 728 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 941 | dev_priv->fbc.fbc_enabled = g4x_fbc_enabled; |
| 729 | dev_priv->display.enable_fbc = g4x_fbc_enable; | 942 | dev_priv->fbc.enable_fbc = g4x_fbc_enable; |
| 730 | dev_priv->display.disable_fbc = g4x_fbc_disable; | 943 | dev_priv->fbc.disable_fbc = g4x_fbc_disable; |
| 731 | } else { | 944 | } else { |
| 732 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 945 | dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled; |
| 733 | dev_priv->display.enable_fbc = i8xx_fbc_enable; | 946 | dev_priv->fbc.enable_fbc = i8xx_fbc_enable; |
| 734 | dev_priv->display.disable_fbc = i8xx_fbc_disable; | 947 | dev_priv->fbc.disable_fbc = i8xx_fbc_disable; |
| 735 | 948 | ||
| 736 | /* This value was pulled out of someone's hat */ | 949 | /* This value was pulled out of someone's hat */ |
| 737 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); | 950 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); |
| 738 | } | 951 | } |
| 739 | 952 | ||
| 740 | dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev); | 953 | dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv); |
| 741 | } | 954 | } |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6372cfc7d053..7eff33ff84f6 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
| @@ -63,8 +63,7 @@ static int intel_fbdev_set_par(struct fb_info *info) | |||
| 63 | * now until we solve this for real. | 63 | * now until we solve this for real. |
| 64 | */ | 64 | */ |
| 65 | mutex_lock(&fb_helper->dev->struct_mutex); | 65 | mutex_lock(&fb_helper->dev->struct_mutex); |
| 66 | ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj, | 66 | intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); |
| 67 | true); | ||
| 68 | mutex_unlock(&fb_helper->dev->struct_mutex); | 67 | mutex_unlock(&fb_helper->dev->struct_mutex); |
| 69 | } | 68 | } |
| 70 | 69 | ||
| @@ -89,7 +88,7 @@ static int intel_fbdev_blank(int blank, struct fb_info *info) | |||
| 89 | * now until we solve this for real. | 88 | * now until we solve this for real. |
| 90 | */ | 89 | */ |
| 91 | mutex_lock(&fb_helper->dev->struct_mutex); | 90 | mutex_lock(&fb_helper->dev->struct_mutex); |
| 92 | intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT); | 91 | intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); |
| 93 | mutex_unlock(&fb_helper->dev->struct_mutex); | 92 | mutex_unlock(&fb_helper->dev->struct_mutex); |
| 94 | } | 93 | } |
| 95 | 94 | ||
| @@ -115,7 +114,7 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var, | |||
| 115 | * now until we solve this for real. | 114 | * now until we solve this for real. |
| 116 | */ | 115 | */ |
| 117 | mutex_lock(&fb_helper->dev->struct_mutex); | 116 | mutex_lock(&fb_helper->dev->struct_mutex); |
| 118 | intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT); | 117 | intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); |
| 119 | mutex_unlock(&fb_helper->dev->struct_mutex); | 118 | mutex_unlock(&fb_helper->dev->struct_mutex); |
| 120 | } | 119 | } |
| 121 | 120 | ||
| @@ -177,7 +176,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, | |||
| 177 | } | 176 | } |
| 178 | 177 | ||
| 179 | /* Flush everything out, we'll be doing GTT only from now on */ | 178 | /* Flush everything out, we'll be doing GTT only from now on */ |
| 180 | ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL); | 179 | ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL); |
| 181 | if (ret) { | 180 | if (ret) { |
| 182 | DRM_ERROR("failed to pin obj: %d\n", ret); | 181 | DRM_ERROR("failed to pin obj: %d\n", ret); |
| 183 | goto out_fb; | 182 | goto out_fb; |
| @@ -484,18 +483,13 @@ retry: | |||
| 484 | * IMPORTANT: We want to use the adjusted mode (i.e. | 483 | * IMPORTANT: We want to use the adjusted mode (i.e. |
| 485 | * after the panel fitter upscaling) as the initial | 484 | * after the panel fitter upscaling) as the initial |
| 486 | * config, not the input mode, which is what crtc->mode | 485 | * config, not the input mode, which is what crtc->mode |
| 487 | * usually contains. But since our current fastboot | 486 | * usually contains. But since our current |
| 488 | * code puts a mode derived from the post-pfit timings | 487 | * code puts a mode derived from the post-pfit timings |
| 489 | * into crtc->mode this works out correctly. We don't | 488 | * into crtc->mode this works out correctly. |
| 490 | * use hwmode anywhere right now, so use it for this | ||
| 491 | * since the fb helper layer wants a pointer to | ||
| 492 | * something we own. | ||
| 493 | */ | 489 | */ |
| 494 | DRM_DEBUG_KMS("looking for current mode on connector %s\n", | 490 | DRM_DEBUG_KMS("looking for current mode on connector %s\n", |
| 495 | connector->name); | 491 | connector->name); |
| 496 | intel_mode_from_pipe_config(&encoder->crtc->hwmode, | 492 | modes[i] = &encoder->crtc->mode; |
| 497 | to_intel_crtc(encoder->crtc)->config); | ||
| 498 | modes[i] = &encoder->crtc->hwmode; | ||
| 499 | } | 493 | } |
| 500 | crtcs[i] = new_crtc; | 494 | crtcs[i] = new_crtc; |
| 501 | 495 | ||
| @@ -582,7 +576,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, | |||
| 582 | struct intel_framebuffer *fb = NULL; | 576 | struct intel_framebuffer *fb = NULL; |
| 583 | struct drm_crtc *crtc; | 577 | struct drm_crtc *crtc; |
| 584 | struct intel_crtc *intel_crtc; | 578 | struct intel_crtc *intel_crtc; |
| 585 | struct intel_initial_plane_config *plane_config = NULL; | ||
| 586 | unsigned int max_size = 0; | 579 | unsigned int max_size = 0; |
| 587 | 580 | ||
| 588 | if (!i915.fastboot) | 581 | if (!i915.fastboot) |
| @@ -590,20 +583,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, | |||
| 590 | 583 | ||
| 591 | /* Find the largest fb */ | 584 | /* Find the largest fb */ |
| 592 | for_each_crtc(dev, crtc) { | 585 | for_each_crtc(dev, crtc) { |
| 586 | struct drm_i915_gem_object *obj = | ||
| 587 | intel_fb_obj(crtc->primary->state->fb); | ||
| 593 | intel_crtc = to_intel_crtc(crtc); | 588 | intel_crtc = to_intel_crtc(crtc); |
| 594 | 589 | ||
| 595 | if (!intel_crtc->active || !crtc->primary->fb) { | 590 | if (!intel_crtc->active || !obj) { |
| 596 | DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", | 591 | DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", |
| 597 | pipe_name(intel_crtc->pipe)); | 592 | pipe_name(intel_crtc->pipe)); |
| 598 | continue; | 593 | continue; |
| 599 | } | 594 | } |
| 600 | 595 | ||
| 601 | if (intel_crtc->plane_config.size > max_size) { | 596 | if (obj->base.size > max_size) { |
| 602 | DRM_DEBUG_KMS("found possible fb from plane %c\n", | 597 | DRM_DEBUG_KMS("found possible fb from plane %c\n", |
| 603 | pipe_name(intel_crtc->pipe)); | 598 | pipe_name(intel_crtc->pipe)); |
| 604 | plane_config = &intel_crtc->plane_config; | 599 | fb = to_intel_framebuffer(crtc->primary->state->fb); |
| 605 | fb = to_intel_framebuffer(crtc->primary->fb); | 600 | max_size = obj->base.size; |
| 606 | max_size = plane_config->size; | ||
| 607 | } | 601 | } |
| 608 | } | 602 | } |
| 609 | 603 | ||
| @@ -638,7 +632,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, | |||
| 638 | DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", | 632 | DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", |
| 639 | pipe_name(intel_crtc->pipe), | 633 | pipe_name(intel_crtc->pipe), |
| 640 | cur_size, fb->base.pitches[0]); | 634 | cur_size, fb->base.pitches[0]); |
| 641 | plane_config = NULL; | ||
| 642 | fb = NULL; | 635 | fb = NULL; |
| 643 | break; | 636 | break; |
| 644 | } | 637 | } |
| @@ -659,7 +652,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, | |||
| 659 | DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", | 652 | DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", |
| 660 | pipe_name(intel_crtc->pipe), | 653 | pipe_name(intel_crtc->pipe), |
| 661 | cur_size, max_size); | 654 | cur_size, max_size); |
| 662 | plane_config = NULL; | ||
| 663 | fb = NULL; | 655 | fb = NULL; |
| 664 | break; | 656 | break; |
| 665 | } | 657 | } |
| @@ -825,11 +817,20 @@ void intel_fbdev_restore_mode(struct drm_device *dev) | |||
| 825 | { | 817 | { |
| 826 | int ret; | 818 | int ret; |
| 827 | struct drm_i915_private *dev_priv = dev->dev_private; | 819 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 820 | struct intel_fbdev *ifbdev = dev_priv->fbdev; | ||
| 821 | struct drm_fb_helper *fb_helper; | ||
| 828 | 822 | ||
| 829 | if (!dev_priv->fbdev) | 823 | if (!ifbdev) |
| 830 | return; | 824 | return; |
| 831 | 825 | ||
| 832 | ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper); | 826 | fb_helper = &ifbdev->helper; |
| 833 | if (ret) | 827 | |
| 828 | ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); | ||
| 829 | if (ret) { | ||
| 834 | DRM_DEBUG("failed to restore crtc mode\n"); | 830 | DRM_DEBUG("failed to restore crtc mode\n"); |
| 831 | } else { | ||
| 832 | mutex_lock(&fb_helper->dev->struct_mutex); | ||
| 833 | intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); | ||
| 834 | mutex_unlock(&fb_helper->dev->struct_mutex); | ||
| 835 | } | ||
| 835 | } | 836 | } |
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 57095f54c1f2..777b1d3ccd41 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c | |||
| @@ -65,84 +65,29 @@ | |||
| 65 | #include "intel_drv.h" | 65 | #include "intel_drv.h" |
| 66 | #include "i915_drv.h" | 66 | #include "i915_drv.h" |
| 67 | 67 | ||
| 68 | static void intel_increase_pllclock(struct drm_device *dev, | ||
| 69 | enum pipe pipe) | ||
| 70 | { | ||
| 71 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 72 | int dpll_reg = DPLL(pipe); | ||
| 73 | int dpll; | ||
| 74 | |||
| 75 | if (!HAS_GMCH_DISPLAY(dev)) | ||
| 76 | return; | ||
| 77 | |||
| 78 | if (!dev_priv->lvds_downclock_avail) | ||
| 79 | return; | ||
| 80 | |||
| 81 | dpll = I915_READ(dpll_reg); | ||
| 82 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | ||
| 83 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | ||
| 84 | |||
| 85 | assert_panel_unlocked(dev_priv, pipe); | ||
| 86 | |||
| 87 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | ||
| 88 | I915_WRITE(dpll_reg, dpll); | ||
| 89 | intel_wait_for_vblank(dev, pipe); | ||
| 90 | |||
| 91 | dpll = I915_READ(dpll_reg); | ||
| 92 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | ||
| 93 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | ||
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | /** | ||
| 98 | * intel_mark_fb_busy - mark given planes as busy | ||
| 99 | * @dev: DRM device | ||
| 100 | * @frontbuffer_bits: bits for the affected planes | ||
| 101 | * @ring: optional ring for asynchronous commands | ||
| 102 | * | ||
| 103 | * This function gets called every time the screen contents change. It can be | ||
| 104 | * used to keep e.g. the update rate at the nominal refresh rate with DRRS. | ||
| 105 | */ | ||
| 106 | static void intel_mark_fb_busy(struct drm_device *dev, | ||
| 107 | unsigned frontbuffer_bits, | ||
| 108 | struct intel_engine_cs *ring) | ||
| 109 | { | ||
| 110 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 111 | enum pipe pipe; | ||
| 112 | |||
| 113 | for_each_pipe(dev_priv, pipe) { | ||
| 114 | if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) | ||
| 115 | continue; | ||
| 116 | |||
| 117 | intel_increase_pllclock(dev, pipe); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | /** | 68 | /** |
| 122 | * intel_fb_obj_invalidate - invalidate frontbuffer object | 69 | * intel_fb_obj_invalidate - invalidate frontbuffer object |
| 123 | * @obj: GEM object to invalidate | 70 | * @obj: GEM object to invalidate |
| 124 | * @ring: set for asynchronous rendering | ||
| 125 | * @origin: which operation caused the invalidation | 71 | * @origin: which operation caused the invalidation |
| 126 | * | 72 | * |
| 127 | * This function gets called every time rendering on the given object starts and | 73 | * This function gets called every time rendering on the given object starts and |
| 128 | * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must | 74 | * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must |
| 129 | * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed | 75 | * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed |
| 130 | * until the rendering completes or a flip on this frontbuffer plane is | 76 | * until the rendering completes or a flip on this frontbuffer plane is |
| 131 | * scheduled. | 77 | * scheduled. |
| 132 | */ | 78 | */ |
| 133 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | 79 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, |
| 134 | struct intel_engine_cs *ring, | ||
| 135 | enum fb_op_origin origin) | 80 | enum fb_op_origin origin) |
| 136 | { | 81 | { |
| 137 | struct drm_device *dev = obj->base.dev; | 82 | struct drm_device *dev = obj->base.dev; |
| 138 | struct drm_i915_private *dev_priv = dev->dev_private; | 83 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 139 | 84 | ||
| 140 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 85 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 141 | 86 | ||
| 142 | if (!obj->frontbuffer_bits) | 87 | if (!obj->frontbuffer_bits) |
| 143 | return; | 88 | return; |
| 144 | 89 | ||
| 145 | if (ring) { | 90 | if (origin == ORIGIN_CS) { |
| 146 | mutex_lock(&dev_priv->fb_tracking.lock); | 91 | mutex_lock(&dev_priv->fb_tracking.lock); |
| 147 | dev_priv->fb_tracking.busy_bits | 92 | dev_priv->fb_tracking.busy_bits |
| 148 | |= obj->frontbuffer_bits; | 93 | |= obj->frontbuffer_bits; |
| @@ -151,8 +96,6 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | |||
| 151 | mutex_unlock(&dev_priv->fb_tracking.lock); | 96 | mutex_unlock(&dev_priv->fb_tracking.lock); |
| 152 | } | 97 | } |
| 153 | 98 | ||
| 154 | intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); | ||
| 155 | |||
| 156 | intel_psr_invalidate(dev, obj->frontbuffer_bits); | 99 | intel_psr_invalidate(dev, obj->frontbuffer_bits); |
| 157 | intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); | 100 | intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); |
| 158 | intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin); | 101 | intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin); |
| @@ -162,6 +105,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | |||
| 162 | * intel_frontbuffer_flush - flush frontbuffer | 105 | * intel_frontbuffer_flush - flush frontbuffer |
| 163 | * @dev: DRM device | 106 | * @dev: DRM device |
| 164 | * @frontbuffer_bits: frontbuffer plane tracking bits | 107 | * @frontbuffer_bits: frontbuffer plane tracking bits |
| 108 | * @origin: which operation caused the flush | ||
| 165 | * | 109 | * |
| 166 | * This function gets called every time rendering on the given planes has | 110 | * This function gets called every time rendering on the given planes has |
| 167 | * completed and frontbuffer caching can be started again. Flushes will get | 111 | * completed and frontbuffer caching can be started again. Flushes will get |
| @@ -169,20 +113,22 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | |||
| 169 | * | 113 | * |
| 170 | * Can be called without any locks held. | 114 | * Can be called without any locks held. |
| 171 | */ | 115 | */ |
| 172 | void intel_frontbuffer_flush(struct drm_device *dev, | 116 | static void intel_frontbuffer_flush(struct drm_device *dev, |
| 173 | unsigned frontbuffer_bits) | 117 | unsigned frontbuffer_bits, |
| 118 | enum fb_op_origin origin) | ||
| 174 | { | 119 | { |
| 175 | struct drm_i915_private *dev_priv = dev->dev_private; | 120 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 176 | 121 | ||
| 177 | /* Delay flushing when rings are still busy.*/ | 122 | /* Delay flushing when rings are still busy.*/ |
| 178 | mutex_lock(&dev_priv->fb_tracking.lock); | 123 | mutex_lock(&dev_priv->fb_tracking.lock); |
| 179 | frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; | 124 | frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; |
| 180 | mutex_unlock(&dev_priv->fb_tracking.lock); | 125 | mutex_unlock(&dev_priv->fb_tracking.lock); |
| 181 | 126 | ||
| 182 | intel_mark_fb_busy(dev, frontbuffer_bits, NULL); | 127 | if (!frontbuffer_bits) |
| 128 | return; | ||
| 183 | 129 | ||
| 184 | intel_edp_drrs_flush(dev, frontbuffer_bits); | 130 | intel_edp_drrs_flush(dev, frontbuffer_bits); |
| 185 | intel_psr_flush(dev, frontbuffer_bits); | 131 | intel_psr_flush(dev, frontbuffer_bits, origin); |
| 186 | intel_fbc_flush(dev_priv, frontbuffer_bits); | 132 | intel_fbc_flush(dev_priv, frontbuffer_bits); |
| 187 | } | 133 | } |
| 188 | 134 | ||
| @@ -190,16 +136,17 @@ void intel_frontbuffer_flush(struct drm_device *dev, | |||
| 190 | * intel_fb_obj_flush - flush frontbuffer object | 136 | * intel_fb_obj_flush - flush frontbuffer object |
| 191 | * @obj: GEM object to flush | 137 | * @obj: GEM object to flush |
| 192 | * @retire: set when retiring asynchronous rendering | 138 | * @retire: set when retiring asynchronous rendering |
| 139 | * @origin: which operation caused the flush | ||
| 193 | * | 140 | * |
| 194 | * This function gets called every time rendering on the given object has | 141 | * This function gets called every time rendering on the given object has |
| 195 | * completed and frontbuffer caching can be started again. If @retire is true | 142 | * completed and frontbuffer caching can be started again. If @retire is true |
| 196 | * then any delayed flushes will be unblocked. | 143 | * then any delayed flushes will be unblocked. |
| 197 | */ | 144 | */ |
| 198 | void intel_fb_obj_flush(struct drm_i915_gem_object *obj, | 145 | void intel_fb_obj_flush(struct drm_i915_gem_object *obj, |
| 199 | bool retire) | 146 | bool retire, enum fb_op_origin origin) |
| 200 | { | 147 | { |
| 201 | struct drm_device *dev = obj->base.dev; | 148 | struct drm_device *dev = obj->base.dev; |
| 202 | struct drm_i915_private *dev_priv = dev->dev_private; | 149 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 203 | unsigned frontbuffer_bits; | 150 | unsigned frontbuffer_bits; |
| 204 | 151 | ||
| 205 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 152 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| @@ -218,7 +165,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, | |||
| 218 | mutex_unlock(&dev_priv->fb_tracking.lock); | 165 | mutex_unlock(&dev_priv->fb_tracking.lock); |
| 219 | } | 166 | } |
| 220 | 167 | ||
| 221 | intel_frontbuffer_flush(dev, frontbuffer_bits); | 168 | intel_frontbuffer_flush(dev, frontbuffer_bits, origin); |
| 222 | } | 169 | } |
| 223 | 170 | ||
| 224 | /** | 171 | /** |
| @@ -236,7 +183,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, | |||
| 236 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, | 183 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, |
| 237 | unsigned frontbuffer_bits) | 184 | unsigned frontbuffer_bits) |
| 238 | { | 185 | { |
| 239 | struct drm_i915_private *dev_priv = dev->dev_private; | 186 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 240 | 187 | ||
| 241 | mutex_lock(&dev_priv->fb_tracking.lock); | 188 | mutex_lock(&dev_priv->fb_tracking.lock); |
| 242 | dev_priv->fb_tracking.flip_bits |= frontbuffer_bits; | 189 | dev_priv->fb_tracking.flip_bits |= frontbuffer_bits; |
| @@ -244,7 +191,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev, | |||
| 244 | dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; | 191 | dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; |
| 245 | mutex_unlock(&dev_priv->fb_tracking.lock); | 192 | mutex_unlock(&dev_priv->fb_tracking.lock); |
| 246 | 193 | ||
| 247 | intel_psr_single_frame_update(dev); | 194 | intel_psr_single_frame_update(dev, frontbuffer_bits); |
| 248 | } | 195 | } |
| 249 | 196 | ||
| 250 | /** | 197 | /** |
| @@ -260,7 +207,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev, | |||
| 260 | void intel_frontbuffer_flip_complete(struct drm_device *dev, | 207 | void intel_frontbuffer_flip_complete(struct drm_device *dev, |
| 261 | unsigned frontbuffer_bits) | 208 | unsigned frontbuffer_bits) |
| 262 | { | 209 | { |
| 263 | struct drm_i915_private *dev_priv = dev->dev_private; | 210 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 264 | 211 | ||
| 265 | mutex_lock(&dev_priv->fb_tracking.lock); | 212 | mutex_lock(&dev_priv->fb_tracking.lock); |
| 266 | /* Mask any cancelled flips. */ | 213 | /* Mask any cancelled flips. */ |
| @@ -268,5 +215,29 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev, | |||
| 268 | dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; | 215 | dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; |
| 269 | mutex_unlock(&dev_priv->fb_tracking.lock); | 216 | mutex_unlock(&dev_priv->fb_tracking.lock); |
| 270 | 217 | ||
| 271 | intel_frontbuffer_flush(dev, frontbuffer_bits); | 218 | intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); |
| 219 | } | ||
| 220 | |||
| 221 | /** | ||
| 222 | * intel_frontbuffer_flip - synchronous frontbuffer flip | ||
| 223 | * @dev: DRM device | ||
| 224 | * @frontbuffer_bits: frontbuffer plane tracking bits | ||
| 225 | * | ||
| 226 | * This function gets called after scheduling a flip on @obj. This is for | ||
| 227 | * synchronous plane updates which will happen on the next vblank and which will | ||
| 228 | * not get delayed by pending gpu rendering. | ||
| 229 | * | ||
| 230 | * Can be called without any locks held. | ||
| 231 | */ | ||
| 232 | void intel_frontbuffer_flip(struct drm_device *dev, | ||
| 233 | unsigned frontbuffer_bits) | ||
| 234 | { | ||
| 235 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
| 236 | |||
| 237 | mutex_lock(&dev_priv->fb_tracking.lock); | ||
| 238 | /* Remove stale busy bits due to the old buffer. */ | ||
| 239 | dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; | ||
| 240 | mutex_unlock(&dev_priv->fb_tracking.lock); | ||
| 241 | |||
| 242 | intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); | ||
| 272 | } | 243 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index e97731aab6dc..70bad5bf1d48 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -174,10 +174,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder) | |||
| 174 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 174 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
| 175 | u32 val = I915_READ(VIDEO_DIP_CTL); | 175 | u32 val = I915_READ(VIDEO_DIP_CTL); |
| 176 | 176 | ||
| 177 | if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK)) | 177 | if ((val & VIDEO_DIP_ENABLE) == 0) |
| 178 | return val & VIDEO_DIP_ENABLE; | 178 | return false; |
| 179 | 179 | ||
| 180 | return false; | 180 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port)) |
| 181 | return false; | ||
| 182 | |||
| 183 | return val & (VIDEO_DIP_ENABLE_AVI | | ||
| 184 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); | ||
| 181 | } | 185 | } |
| 182 | 186 | ||
| 183 | static void ibx_write_infoframe(struct drm_encoder *encoder, | 187 | static void ibx_write_infoframe(struct drm_encoder *encoder, |
| @@ -227,10 +231,15 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder) | |||
| 227 | int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 231 | int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
| 228 | u32 val = I915_READ(reg); | 232 | u32 val = I915_READ(reg); |
| 229 | 233 | ||
| 230 | if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK)) | 234 | if ((val & VIDEO_DIP_ENABLE) == 0) |
| 231 | return val & VIDEO_DIP_ENABLE; | 235 | return false; |
| 236 | |||
| 237 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port)) | ||
| 238 | return false; | ||
| 232 | 239 | ||
| 233 | return false; | 240 | return val & (VIDEO_DIP_ENABLE_AVI | |
| 241 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||
| 242 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 234 | } | 243 | } |
| 235 | 244 | ||
| 236 | static void cpt_write_infoframe(struct drm_encoder *encoder, | 245 | static void cpt_write_infoframe(struct drm_encoder *encoder, |
| @@ -282,7 +291,12 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder) | |||
| 282 | int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 291 | int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
| 283 | u32 val = I915_READ(reg); | 292 | u32 val = I915_READ(reg); |
| 284 | 293 | ||
| 285 | return val & VIDEO_DIP_ENABLE; | 294 | if ((val & VIDEO_DIP_ENABLE) == 0) |
| 295 | return false; | ||
| 296 | |||
| 297 | return val & (VIDEO_DIP_ENABLE_AVI | | ||
| 298 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||
| 299 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 286 | } | 300 | } |
| 287 | 301 | ||
| 288 | static void vlv_write_infoframe(struct drm_encoder *encoder, | 302 | static void vlv_write_infoframe(struct drm_encoder *encoder, |
| @@ -332,10 +346,15 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder) | |||
| 332 | int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | 346 | int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); |
| 333 | u32 val = I915_READ(reg); | 347 | u32 val = I915_READ(reg); |
| 334 | 348 | ||
| 335 | if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK)) | 349 | if ((val & VIDEO_DIP_ENABLE) == 0) |
| 336 | return val & VIDEO_DIP_ENABLE; | 350 | return false; |
| 337 | 351 | ||
| 338 | return false; | 352 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port)) |
| 353 | return false; | ||
| 354 | |||
| 355 | return val & (VIDEO_DIP_ENABLE_AVI | | ||
| 356 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||
| 357 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 339 | } | 358 | } |
| 340 | 359 | ||
| 341 | static void hsw_write_infoframe(struct drm_encoder *encoder, | 360 | static void hsw_write_infoframe(struct drm_encoder *encoder, |
| @@ -383,8 +402,9 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder) | |||
| 383 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); | 402 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); |
| 384 | u32 val = I915_READ(ctl_reg); | 403 | u32 val = I915_READ(ctl_reg); |
| 385 | 404 | ||
| 386 | return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW | | 405 | return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | |
| 387 | VIDEO_DIP_ENABLE_VS_HSW); | 406 | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | |
| 407 | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); | ||
| 388 | } | 408 | } |
| 389 | 409 | ||
| 390 | /* | 410 | /* |
| @@ -514,7 +534,13 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, | |||
| 514 | if (!enable) { | 534 | if (!enable) { |
| 515 | if (!(val & VIDEO_DIP_ENABLE)) | 535 | if (!(val & VIDEO_DIP_ENABLE)) |
| 516 | return; | 536 | return; |
| 517 | val &= ~VIDEO_DIP_ENABLE; | 537 | if (port != (val & VIDEO_DIP_PORT_MASK)) { |
| 538 | DRM_DEBUG_KMS("video DIP still enabled on port %c\n", | ||
| 539 | (val & VIDEO_DIP_PORT_MASK) >> 29); | ||
| 540 | return; | ||
| 541 | } | ||
| 542 | val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | | ||
| 543 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); | ||
| 518 | I915_WRITE(reg, val); | 544 | I915_WRITE(reg, val); |
| 519 | POSTING_READ(reg); | 545 | POSTING_READ(reg); |
| 520 | return; | 546 | return; |
| @@ -522,16 +548,17 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, | |||
| 522 | 548 | ||
| 523 | if (port != (val & VIDEO_DIP_PORT_MASK)) { | 549 | if (port != (val & VIDEO_DIP_PORT_MASK)) { |
| 524 | if (val & VIDEO_DIP_ENABLE) { | 550 | if (val & VIDEO_DIP_ENABLE) { |
| 525 | val &= ~VIDEO_DIP_ENABLE; | 551 | DRM_DEBUG_KMS("video DIP already enabled on port %c\n", |
| 526 | I915_WRITE(reg, val); | 552 | (val & VIDEO_DIP_PORT_MASK) >> 29); |
| 527 | POSTING_READ(reg); | 553 | return; |
| 528 | } | 554 | } |
| 529 | val &= ~VIDEO_DIP_PORT_MASK; | 555 | val &= ~VIDEO_DIP_PORT_MASK; |
| 530 | val |= port; | 556 | val |= port; |
| 531 | } | 557 | } |
| 532 | 558 | ||
| 533 | val |= VIDEO_DIP_ENABLE; | 559 | val |= VIDEO_DIP_ENABLE; |
| 534 | val &= ~VIDEO_DIP_ENABLE_VENDOR; | 560 | val &= ~(VIDEO_DIP_ENABLE_AVI | |
| 561 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); | ||
| 535 | 562 | ||
| 536 | I915_WRITE(reg, val); | 563 | I915_WRITE(reg, val); |
| 537 | POSTING_READ(reg); | 564 | POSTING_READ(reg); |
| @@ -541,6 +568,97 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, | |||
| 541 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 568 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
| 542 | } | 569 | } |
| 543 | 570 | ||
| 571 | static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder) | ||
| 572 | { | ||
| 573 | struct drm_device *dev = encoder->dev; | ||
| 574 | struct drm_connector *connector; | ||
| 575 | |||
| 576 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
| 577 | |||
| 578 | /* | ||
| 579 | * HDMI cloning is only supported on g4x which doesn't | ||
| 580 | * support deep color or GCP infoframes anyway so no | ||
| 581 | * need to worry about multiple HDMI sinks here. | ||
| 582 | */ | ||
| 583 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
| 584 | if (connector->encoder == encoder) | ||
| 585 | return connector->display_info.bpc > 8; | ||
| 586 | |||
| 587 | return false; | ||
| 588 | } | ||
| 589 | |||
| 590 | /* | ||
| 591 | * Determine if default_phase=1 can be indicated in the GCP infoframe. | ||
| 592 | * | ||
| 593 | * From HDMI specification 1.4a: | ||
| 594 | * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0 | ||
| 595 | * - The first pixel following each Video Data Period shall have a pixel packing phase of 0 | ||
| 596 | * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase | ||
| 597 | * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing | ||
| 598 | * phase of 0 | ||
| 599 | */ | ||
| 600 | static bool gcp_default_phase_possible(int pipe_bpp, | ||
| 601 | const struct drm_display_mode *mode) | ||
| 602 | { | ||
| 603 | unsigned int pixels_per_group; | ||
| 604 | |||
| 605 | switch (pipe_bpp) { | ||
| 606 | case 30: | ||
| 607 | /* 4 pixels in 5 clocks */ | ||
| 608 | pixels_per_group = 4; | ||
| 609 | break; | ||
| 610 | case 36: | ||
| 611 | /* 2 pixels in 3 clocks */ | ||
| 612 | pixels_per_group = 2; | ||
| 613 | break; | ||
| 614 | case 48: | ||
| 615 | /* 1 pixel in 2 clocks */ | ||
| 616 | pixels_per_group = 1; | ||
| 617 | break; | ||
| 618 | default: | ||
| 619 | /* phase information not relevant for 8bpc */ | ||
| 620 | return false; | ||
| 621 | } | ||
| 622 | |||
| 623 | return mode->crtc_hdisplay % pixels_per_group == 0 && | ||
| 624 | mode->crtc_htotal % pixels_per_group == 0 && | ||
| 625 | mode->crtc_hblank_start % pixels_per_group == 0 && | ||
| 626 | mode->crtc_hblank_end % pixels_per_group == 0 && | ||
| 627 | mode->crtc_hsync_start % pixels_per_group == 0 && | ||
| 628 | mode->crtc_hsync_end % pixels_per_group == 0 && | ||
| 629 | ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 || | ||
| 630 | mode->crtc_htotal/2 % pixels_per_group == 0); | ||
| 631 | } | ||
| 632 | |||
| 633 | static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) | ||
| 634 | { | ||
| 635 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||
| 636 | struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); | ||
| 637 | u32 reg, val = 0; | ||
| 638 | |||
| 639 | if (HAS_DDI(dev_priv)) | ||
| 640 | reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); | ||
| 641 | else if (IS_VALLEYVIEW(dev_priv)) | ||
| 642 | reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); | ||
| 643 | else if (HAS_PCH_SPLIT(dev_priv->dev)) | ||
| 644 | reg = TVIDEO_DIP_GCP(crtc->pipe); | ||
| 645 | else | ||
| 646 | return false; | ||
| 647 | |||
| 648 | /* Indicate color depth whenever the sink supports deep color */ | ||
| 649 | if (hdmi_sink_is_deep_color(encoder)) | ||
| 650 | val |= GCP_COLOR_INDICATION; | ||
| 651 | |||
| 652 | /* Enable default_phase whenever the display mode is suitably aligned */ | ||
| 653 | if (gcp_default_phase_possible(crtc->config->pipe_bpp, | ||
| 654 | &crtc->config->base.adjusted_mode)) | ||
| 655 | val |= GCP_DEFAULT_PHASE_ENABLE; | ||
| 656 | |||
| 657 | I915_WRITE(reg, val); | ||
| 658 | |||
| 659 | return val != 0; | ||
| 660 | } | ||
| 661 | |||
| 544 | static void ibx_set_infoframes(struct drm_encoder *encoder, | 662 | static void ibx_set_infoframes(struct drm_encoder *encoder, |
| 545 | bool enable, | 663 | bool enable, |
| 546 | struct drm_display_mode *adjusted_mode) | 664 | struct drm_display_mode *adjusted_mode) |
| @@ -561,25 +679,29 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, | |||
| 561 | if (!enable) { | 679 | if (!enable) { |
| 562 | if (!(val & VIDEO_DIP_ENABLE)) | 680 | if (!(val & VIDEO_DIP_ENABLE)) |
| 563 | return; | 681 | return; |
| 564 | val &= ~VIDEO_DIP_ENABLE; | 682 | val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | |
| 683 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||
| 684 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 565 | I915_WRITE(reg, val); | 685 | I915_WRITE(reg, val); |
| 566 | POSTING_READ(reg); | 686 | POSTING_READ(reg); |
| 567 | return; | 687 | return; |
| 568 | } | 688 | } |
| 569 | 689 | ||
| 570 | if (port != (val & VIDEO_DIP_PORT_MASK)) { | 690 | if (port != (val & VIDEO_DIP_PORT_MASK)) { |
| 571 | if (val & VIDEO_DIP_ENABLE) { | 691 | WARN(val & VIDEO_DIP_ENABLE, |
| 572 | val &= ~VIDEO_DIP_ENABLE; | 692 | "DIP already enabled on port %c\n", |
| 573 | I915_WRITE(reg, val); | 693 | (val & VIDEO_DIP_PORT_MASK) >> 29); |
| 574 | POSTING_READ(reg); | ||
| 575 | } | ||
| 576 | val &= ~VIDEO_DIP_PORT_MASK; | 694 | val &= ~VIDEO_DIP_PORT_MASK; |
| 577 | val |= port; | 695 | val |= port; |
| 578 | } | 696 | } |
| 579 | 697 | ||
| 580 | val |= VIDEO_DIP_ENABLE; | 698 | val |= VIDEO_DIP_ENABLE; |
| 581 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 699 | val &= ~(VIDEO_DIP_ENABLE_AVI | |
| 582 | VIDEO_DIP_ENABLE_GCP); | 700 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
| 701 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 702 | |||
| 703 | if (intel_hdmi_set_gcp_infoframe(encoder)) | ||
| 704 | val |= VIDEO_DIP_ENABLE_GCP; | ||
| 583 | 705 | ||
| 584 | I915_WRITE(reg, val); | 706 | I915_WRITE(reg, val); |
| 585 | POSTING_READ(reg); | 707 | POSTING_READ(reg); |
| @@ -607,7 +729,9 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, | |||
| 607 | if (!enable) { | 729 | if (!enable) { |
| 608 | if (!(val & VIDEO_DIP_ENABLE)) | 730 | if (!(val & VIDEO_DIP_ENABLE)) |
| 609 | return; | 731 | return; |
| 610 | val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); | 732 | val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | |
| 733 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||
| 734 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 611 | I915_WRITE(reg, val); | 735 | I915_WRITE(reg, val); |
| 612 | POSTING_READ(reg); | 736 | POSTING_READ(reg); |
| 613 | return; | 737 | return; |
| @@ -616,7 +740,10 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, | |||
| 616 | /* Set both together, unset both together: see the spec. */ | 740 | /* Set both together, unset both together: see the spec. */ |
| 617 | val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; | 741 | val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; |
| 618 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 742 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
| 619 | VIDEO_DIP_ENABLE_GCP); | 743 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); |
| 744 | |||
| 745 | if (intel_hdmi_set_gcp_infoframe(encoder)) | ||
| 746 | val |= VIDEO_DIP_ENABLE_GCP; | ||
| 620 | 747 | ||
| 621 | I915_WRITE(reg, val); | 748 | I915_WRITE(reg, val); |
| 622 | POSTING_READ(reg); | 749 | POSTING_READ(reg); |
| @@ -646,25 +773,29 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, | |||
| 646 | if (!enable) { | 773 | if (!enable) { |
| 647 | if (!(val & VIDEO_DIP_ENABLE)) | 774 | if (!(val & VIDEO_DIP_ENABLE)) |
| 648 | return; | 775 | return; |
| 649 | val &= ~VIDEO_DIP_ENABLE; | 776 | val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | |
| 777 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | ||
| 778 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 650 | I915_WRITE(reg, val); | 779 | I915_WRITE(reg, val); |
| 651 | POSTING_READ(reg); | 780 | POSTING_READ(reg); |
| 652 | return; | 781 | return; |
| 653 | } | 782 | } |
| 654 | 783 | ||
| 655 | if (port != (val & VIDEO_DIP_PORT_MASK)) { | 784 | if (port != (val & VIDEO_DIP_PORT_MASK)) { |
| 656 | if (val & VIDEO_DIP_ENABLE) { | 785 | WARN(val & VIDEO_DIP_ENABLE, |
| 657 | val &= ~VIDEO_DIP_ENABLE; | 786 | "DIP already enabled on port %c\n", |
| 658 | I915_WRITE(reg, val); | 787 | (val & VIDEO_DIP_PORT_MASK) >> 29); |
| 659 | POSTING_READ(reg); | ||
| 660 | } | ||
| 661 | val &= ~VIDEO_DIP_PORT_MASK; | 788 | val &= ~VIDEO_DIP_PORT_MASK; |
| 662 | val |= port; | 789 | val |= port; |
| 663 | } | 790 | } |
| 664 | 791 | ||
| 665 | val |= VIDEO_DIP_ENABLE; | 792 | val |= VIDEO_DIP_ENABLE; |
| 666 | val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | | 793 | val &= ~(VIDEO_DIP_ENABLE_AVI | |
| 667 | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP); | 794 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
| 795 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | ||
| 796 | |||
| 797 | if (intel_hdmi_set_gcp_infoframe(encoder)) | ||
| 798 | val |= VIDEO_DIP_ENABLE_GCP; | ||
| 668 | 799 | ||
| 669 | I915_WRITE(reg, val); | 800 | I915_WRITE(reg, val); |
| 670 | POSTING_READ(reg); | 801 | POSTING_READ(reg); |
| @@ -686,14 +817,18 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, | |||
| 686 | 817 | ||
| 687 | assert_hdmi_port_disabled(intel_hdmi); | 818 | assert_hdmi_port_disabled(intel_hdmi); |
| 688 | 819 | ||
| 820 | val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | | ||
| 821 | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | | ||
| 822 | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); | ||
| 823 | |||
| 689 | if (!enable) { | 824 | if (!enable) { |
| 690 | I915_WRITE(reg, 0); | 825 | I915_WRITE(reg, val); |
| 691 | POSTING_READ(reg); | 826 | POSTING_READ(reg); |
| 692 | return; | 827 | return; |
| 693 | } | 828 | } |
| 694 | 829 | ||
| 695 | val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW | | 830 | if (intel_hdmi_set_gcp_infoframe(encoder)) |
| 696 | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW); | 831 | val |= VIDEO_DIP_ENABLE_GCP_HSW; |
| 697 | 832 | ||
| 698 | I915_WRITE(reg, val); | 833 | I915_WRITE(reg, val); |
| 699 | POSTING_READ(reg); | 834 | POSTING_READ(reg); |
| @@ -808,58 +943,146 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, | |||
| 808 | else | 943 | else |
| 809 | dotclock = pipe_config->port_clock; | 944 | dotclock = pipe_config->port_clock; |
| 810 | 945 | ||
| 946 | if (pipe_config->pixel_multiplier) | ||
| 947 | dotclock /= pipe_config->pixel_multiplier; | ||
| 948 | |||
| 811 | if (HAS_PCH_SPLIT(dev_priv->dev)) | 949 | if (HAS_PCH_SPLIT(dev_priv->dev)) |
| 812 | ironlake_check_encoder_dotclock(pipe_config, dotclock); | 950 | ironlake_check_encoder_dotclock(pipe_config, dotclock); |
| 813 | 951 | ||
| 814 | pipe_config->base.adjusted_mode.crtc_clock = dotclock; | 952 | pipe_config->base.adjusted_mode.crtc_clock = dotclock; |
| 815 | } | 953 | } |
| 816 | 954 | ||
| 817 | static void intel_enable_hdmi(struct intel_encoder *encoder) | 955 | static void intel_enable_hdmi_audio(struct intel_encoder *encoder) |
| 956 | { | ||
| 957 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
| 958 | |||
| 959 | WARN_ON(!crtc->config->has_hdmi_sink); | ||
| 960 | DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", | ||
| 961 | pipe_name(crtc->pipe)); | ||
| 962 | intel_audio_codec_enable(encoder); | ||
| 963 | } | ||
| 964 | |||
| 965 | static void g4x_enable_hdmi(struct intel_encoder *encoder) | ||
| 818 | { | 966 | { |
| 819 | struct drm_device *dev = encoder->base.dev; | 967 | struct drm_device *dev = encoder->base.dev; |
| 820 | struct drm_i915_private *dev_priv = dev->dev_private; | 968 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 821 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 969 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
| 822 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 970 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
| 823 | u32 temp; | 971 | u32 temp; |
| 824 | u32 enable_bits = SDVO_ENABLE; | ||
| 825 | 972 | ||
| 826 | if (intel_crtc->config->has_audio) | 973 | temp = I915_READ(intel_hdmi->hdmi_reg); |
| 827 | enable_bits |= SDVO_AUDIO_ENABLE; | 974 | |
| 975 | temp |= SDVO_ENABLE; | ||
| 976 | if (crtc->config->has_audio) | ||
| 977 | temp |= SDVO_AUDIO_ENABLE; | ||
| 978 | |||
| 979 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | ||
| 980 | POSTING_READ(intel_hdmi->hdmi_reg); | ||
| 981 | |||
| 982 | if (crtc->config->has_audio) | ||
| 983 | intel_enable_hdmi_audio(encoder); | ||
| 984 | } | ||
| 985 | |||
| 986 | static void ibx_enable_hdmi(struct intel_encoder *encoder) | ||
| 987 | { | ||
| 988 | struct drm_device *dev = encoder->base.dev; | ||
| 989 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 990 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
| 991 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | ||
| 992 | u32 temp; | ||
| 828 | 993 | ||
| 829 | temp = I915_READ(intel_hdmi->hdmi_reg); | 994 | temp = I915_READ(intel_hdmi->hdmi_reg); |
| 830 | 995 | ||
| 831 | /* HW workaround for IBX, we need to move the port to transcoder A | 996 | temp |= SDVO_ENABLE; |
| 832 | * before disabling it, so restore the transcoder select bit here. */ | 997 | if (crtc->config->has_audio) |
| 833 | if (HAS_PCH_IBX(dev)) | 998 | temp |= SDVO_AUDIO_ENABLE; |
| 834 | enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe); | 999 | |
| 1000 | /* | ||
| 1001 | * HW workaround, need to write this twice for issue | ||
| 1002 | * that may result in first write getting masked. | ||
| 1003 | */ | ||
| 1004 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | ||
| 1005 | POSTING_READ(intel_hdmi->hdmi_reg); | ||
| 1006 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | ||
| 1007 | POSTING_READ(intel_hdmi->hdmi_reg); | ||
| 835 | 1008 | ||
| 836 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but | 1009 | /* |
| 837 | * we do this anyway which shows more stable in testing. | 1010 | * HW workaround, need to toggle enable bit off and on |
| 1011 | * for 12bpc with pixel repeat. | ||
| 1012 | * | ||
| 1013 | * FIXME: BSpec says this should be done at the end of | ||
| 1014 | * of the modeset sequence, so not sure if this isn't too soon. | ||
| 838 | */ | 1015 | */ |
| 839 | if (HAS_PCH_SPLIT(dev)) { | 1016 | if (crtc->config->pipe_bpp > 24 && |
| 1017 | crtc->config->pixel_multiplier > 1) { | ||
| 840 | I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); | 1018 | I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); |
| 841 | POSTING_READ(intel_hdmi->hdmi_reg); | 1019 | POSTING_READ(intel_hdmi->hdmi_reg); |
| 1020 | |||
| 1021 | /* | ||
| 1022 | * HW workaround, need to write this twice for issue | ||
| 1023 | * that may result in first write getting masked. | ||
| 1024 | */ | ||
| 1025 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | ||
| 1026 | POSTING_READ(intel_hdmi->hdmi_reg); | ||
| 1027 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | ||
| 1028 | POSTING_READ(intel_hdmi->hdmi_reg); | ||
| 842 | } | 1029 | } |
| 843 | 1030 | ||
| 844 | temp |= enable_bits; | 1031 | if (crtc->config->has_audio) |
| 1032 | intel_enable_hdmi_audio(encoder); | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | static void cpt_enable_hdmi(struct intel_encoder *encoder) | ||
| 1036 | { | ||
| 1037 | struct drm_device *dev = encoder->base.dev; | ||
| 1038 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1039 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
| 1040 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | ||
| 1041 | enum pipe pipe = crtc->pipe; | ||
| 1042 | u32 temp; | ||
| 1043 | |||
| 1044 | temp = I915_READ(intel_hdmi->hdmi_reg); | ||
| 1045 | |||
| 1046 | temp |= SDVO_ENABLE; | ||
| 1047 | if (crtc->config->has_audio) | ||
| 1048 | temp |= SDVO_AUDIO_ENABLE; | ||
| 1049 | |||
| 1050 | /* | ||
| 1051 | * WaEnableHDMI8bpcBefore12bpc:snb,ivb | ||
| 1052 | * | ||
| 1053 | * The procedure for 12bpc is as follows: | ||
| 1054 | * 1. disable HDMI clock gating | ||
| 1055 | * 2. enable HDMI with 8bpc | ||
| 1056 | * 3. enable HDMI with 12bpc | ||
| 1057 | * 4. enable HDMI clock gating | ||
| 1058 | */ | ||
| 1059 | |||
| 1060 | if (crtc->config->pipe_bpp > 24) { | ||
| 1061 | I915_WRITE(TRANS_CHICKEN1(pipe), | ||
| 1062 | I915_READ(TRANS_CHICKEN1(pipe)) | | ||
| 1063 | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); | ||
| 1064 | |||
| 1065 | temp &= ~SDVO_COLOR_FORMAT_MASK; | ||
| 1066 | temp |= SDVO_COLOR_FORMAT_8bpc; | ||
| 1067 | } | ||
| 845 | 1068 | ||
| 846 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 1069 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
| 847 | POSTING_READ(intel_hdmi->hdmi_reg); | 1070 | POSTING_READ(intel_hdmi->hdmi_reg); |
| 848 | 1071 | ||
| 849 | /* HW workaround, need to write this twice for issue that may result | 1072 | if (crtc->config->pipe_bpp > 24) { |
| 850 | * in first write getting masked. | 1073 | temp &= ~SDVO_COLOR_FORMAT_MASK; |
| 851 | */ | 1074 | temp |= HDMI_COLOR_FORMAT_12bpc; |
| 852 | if (HAS_PCH_SPLIT(dev)) { | 1075 | |
| 853 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 1076 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
| 854 | POSTING_READ(intel_hdmi->hdmi_reg); | 1077 | POSTING_READ(intel_hdmi->hdmi_reg); |
| 855 | } | ||
| 856 | 1078 | ||
| 857 | if (intel_crtc->config->has_audio) { | 1079 | I915_WRITE(TRANS_CHICKEN1(pipe), |
| 858 | WARN_ON(!intel_crtc->config->has_hdmi_sink); | 1080 | I915_READ(TRANS_CHICKEN1(pipe)) & |
| 859 | DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", | 1081 | ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); |
| 860 | pipe_name(intel_crtc->pipe)); | ||
| 861 | intel_audio_codec_enable(encoder); | ||
| 862 | } | 1082 | } |
| 1083 | |||
| 1084 | if (crtc->config->has_audio) | ||
| 1085 | intel_enable_hdmi_audio(encoder); | ||
| 863 | } | 1086 | } |
| 864 | 1087 | ||
| 865 | static void vlv_enable_hdmi(struct intel_encoder *encoder) | 1088 | static void vlv_enable_hdmi(struct intel_encoder *encoder) |
| @@ -901,6 +1124,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
| 901 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 1124 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
| 902 | POSTING_READ(intel_hdmi->hdmi_reg); | 1125 | POSTING_READ(intel_hdmi->hdmi_reg); |
| 903 | } | 1126 | } |
| 1127 | |||
| 1128 | intel_hdmi->set_infoframes(&encoder->base, false, NULL); | ||
| 904 | } | 1129 | } |
| 905 | 1130 | ||
| 906 | static void g4x_disable_hdmi(struct intel_encoder *encoder) | 1131 | static void g4x_disable_hdmi(struct intel_encoder *encoder) |
| @@ -926,7 +1151,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder) | |||
| 926 | intel_disable_hdmi(encoder); | 1151 | intel_disable_hdmi(encoder); |
| 927 | } | 1152 | } |
| 928 | 1153 | ||
| 929 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) | 1154 | static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) |
| 930 | { | 1155 | { |
| 931 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 1156 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
| 932 | 1157 | ||
| @@ -939,24 +1164,51 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) | |||
| 939 | } | 1164 | } |
| 940 | 1165 | ||
| 941 | static enum drm_mode_status | 1166 | static enum drm_mode_status |
| 1167 | hdmi_port_clock_valid(struct intel_hdmi *hdmi, | ||
| 1168 | int clock, bool respect_dvi_limit) | ||
| 1169 | { | ||
| 1170 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
| 1171 | |||
| 1172 | if (clock < 25000) | ||
| 1173 | return MODE_CLOCK_LOW; | ||
| 1174 | if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit)) | ||
| 1175 | return MODE_CLOCK_HIGH; | ||
| 1176 | |||
| 1177 | /* BXT DPLL can't generate 223-240 MHz */ | ||
| 1178 | if (IS_BROXTON(dev) && clock > 223333 && clock < 240000) | ||
| 1179 | return MODE_CLOCK_RANGE; | ||
| 1180 | |||
| 1181 | /* CHV DPLL can't generate 216-240 MHz */ | ||
| 1182 | if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000) | ||
| 1183 | return MODE_CLOCK_RANGE; | ||
| 1184 | |||
| 1185 | return MODE_OK; | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | static enum drm_mode_status | ||
| 942 | intel_hdmi_mode_valid(struct drm_connector *connector, | 1189 | intel_hdmi_mode_valid(struct drm_connector *connector, |
| 943 | struct drm_display_mode *mode) | 1190 | struct drm_display_mode *mode) |
| 944 | { | 1191 | { |
| 945 | int clock = mode->clock; | 1192 | struct intel_hdmi *hdmi = intel_attached_hdmi(connector); |
| 1193 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
| 1194 | enum drm_mode_status status; | ||
| 1195 | int clock; | ||
| 946 | 1196 | ||
| 1197 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 1198 | return MODE_NO_DBLESCAN; | ||
| 1199 | |||
| 1200 | clock = mode->clock; | ||
| 947 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) | 1201 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
| 948 | clock *= 2; | 1202 | clock *= 2; |
| 949 | 1203 | ||
| 950 | if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector), | 1204 | /* check if we can do 8bpc */ |
| 951 | true)) | 1205 | status = hdmi_port_clock_valid(hdmi, clock, true); |
| 952 | return MODE_CLOCK_HIGH; | ||
| 953 | if (clock < 20000) | ||
| 954 | return MODE_CLOCK_LOW; | ||
| 955 | 1206 | ||
| 956 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1207 | /* if we can't do 8bpc we may still be able to do 12bpc */ |
| 957 | return MODE_NO_DBLESCAN; | 1208 | if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK) |
| 1209 | status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true); | ||
| 958 | 1210 | ||
| 959 | return MODE_OK; | 1211 | return status; |
| 960 | } | 1212 | } |
| 961 | 1213 | ||
| 962 | static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) | 1214 | static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) |
| @@ -997,8 +1249,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 997 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1249 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
| 998 | struct drm_device *dev = encoder->base.dev; | 1250 | struct drm_device *dev = encoder->base.dev; |
| 999 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 1251 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
| 1000 | int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2; | 1252 | int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock; |
| 1001 | int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); | 1253 | int clock_12bpc = clock_8bpc * 3 / 2; |
| 1002 | int desired_bpp; | 1254 | int desired_bpp; |
| 1003 | 1255 | ||
| 1004 | pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink; | 1256 | pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink; |
| @@ -1017,6 +1269,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 1017 | 1269 | ||
| 1018 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { | 1270 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { |
| 1019 | pipe_config->pixel_multiplier = 2; | 1271 | pipe_config->pixel_multiplier = 2; |
| 1272 | clock_8bpc *= 2; | ||
| 1273 | clock_12bpc *= 2; | ||
| 1020 | } | 1274 | } |
| 1021 | 1275 | ||
| 1022 | if (intel_hdmi->color_range) | 1276 | if (intel_hdmi->color_range) |
| @@ -1035,9 +1289,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 1035 | * within limits. | 1289 | * within limits. |
| 1036 | */ | 1290 | */ |
| 1037 | if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && | 1291 | if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && |
| 1038 | clock_12bpc <= portclock_limit && | 1292 | hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK && |
| 1039 | hdmi_12bpc_possible(pipe_config) && | 1293 | hdmi_12bpc_possible(pipe_config)) { |
| 1040 | 0 /* FIXME 12bpc support totally broken */) { | ||
| 1041 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 1294 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
| 1042 | desired_bpp = 12*3; | 1295 | desired_bpp = 12*3; |
| 1043 | 1296 | ||
| @@ -1046,6 +1299,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 1046 | } else { | 1299 | } else { |
| 1047 | DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); | 1300 | DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); |
| 1048 | desired_bpp = 8*3; | 1301 | desired_bpp = 8*3; |
| 1302 | |||
| 1303 | pipe_config->port_clock = clock_8bpc; | ||
| 1049 | } | 1304 | } |
| 1050 | 1305 | ||
| 1051 | if (!pipe_config->bw_constrained) { | 1306 | if (!pipe_config->bw_constrained) { |
| @@ -1053,8 +1308,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 1053 | pipe_config->pipe_bpp = desired_bpp; | 1308 | pipe_config->pipe_bpp = desired_bpp; |
| 1054 | } | 1309 | } |
| 1055 | 1310 | ||
| 1056 | if (adjusted_mode->crtc_clock > portclock_limit) { | 1311 | if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock, |
| 1057 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); | 1312 | false) != MODE_OK) { |
| 1313 | DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n"); | ||
| 1058 | return false; | 1314 | return false; |
| 1059 | } | 1315 | } |
| 1060 | 1316 | ||
| @@ -1323,7 +1579,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
| 1323 | intel_crtc->config->has_hdmi_sink, | 1579 | intel_crtc->config->has_hdmi_sink, |
| 1324 | adjusted_mode); | 1580 | adjusted_mode); |
| 1325 | 1581 | ||
| 1326 | intel_enable_hdmi(encoder); | 1582 | g4x_enable_hdmi(encoder); |
| 1327 | 1583 | ||
| 1328 | vlv_wait_port_ready(dev_priv, dport, 0x0); | 1584 | vlv_wait_port_ready(dev_priv, dport, 0x0); |
| 1329 | } | 1585 | } |
| @@ -1640,7 +1896,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
| 1640 | intel_crtc->config->has_hdmi_sink, | 1896 | intel_crtc->config->has_hdmi_sink, |
| 1641 | adjusted_mode); | 1897 | adjusted_mode); |
| 1642 | 1898 | ||
| 1643 | intel_enable_hdmi(encoder); | 1899 | g4x_enable_hdmi(encoder); |
| 1644 | 1900 | ||
| 1645 | vlv_wait_port_ready(dev_priv, dport, 0x0); | 1901 | vlv_wait_port_ready(dev_priv, dport, 0x0); |
| 1646 | } | 1902 | } |
| @@ -1827,7 +2083,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) | |||
| 1827 | intel_encoder->post_disable = vlv_hdmi_post_disable; | 2083 | intel_encoder->post_disable = vlv_hdmi_post_disable; |
| 1828 | } else { | 2084 | } else { |
| 1829 | intel_encoder->pre_enable = intel_hdmi_pre_enable; | 2085 | intel_encoder->pre_enable = intel_hdmi_pre_enable; |
| 1830 | intel_encoder->enable = intel_enable_hdmi; | 2086 | if (HAS_PCH_CPT(dev)) |
| 2087 | intel_encoder->enable = cpt_enable_hdmi; | ||
| 2088 | else if (HAS_PCH_IBX(dev)) | ||
| 2089 | intel_encoder->enable = ibx_enable_hdmi; | ||
| 2090 | else | ||
| 2091 | intel_encoder->enable = g4x_enable_hdmi; | ||
| 1831 | } | 2092 | } |
| 1832 | 2093 | ||
| 1833 | intel_encoder->type = INTEL_OUTPUT_HDMI; | 2094 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c new file mode 100644 index 000000000000..3c9171f11531 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
| @@ -0,0 +1,499 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2015 Intel Corporation | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
| 21 | * IN THE SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/kernel.h> | ||
| 25 | |||
| 26 | #include <drm/drmP.h> | ||
| 27 | #include <drm/i915_drm.h> | ||
| 28 | |||
| 29 | #include "i915_drv.h" | ||
| 30 | #include "intel_drv.h" | ||
| 31 | |||
| 32 | /** | ||
| 33 | * DOC: Hotplug | ||
| 34 | * | ||
| 35 | * Simply put, hotplug occurs when a display is connected to or disconnected | ||
| 36 | * from the system. However, there may be adapters and docking stations and | ||
| 37 | * Display Port short pulses and MST devices involved, complicating matters. | ||
| 38 | * | ||
| 39 | * Hotplug in i915 is handled in many different levels of abstraction. | ||
| 40 | * | ||
| 41 | * The platform dependent interrupt handling code in i915_irq.c enables, | ||
| 42 | * disables, and does preliminary handling of the interrupts. The interrupt | ||
| 43 | * handlers gather the hotplug detect (HPD) information from relevant registers | ||
| 44 | * into a platform independent mask of hotplug pins that have fired. | ||
| 45 | * | ||
| 46 | * The platform independent interrupt handler intel_hpd_irq_handler() in | ||
| 47 | * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes | ||
| 48 | * further processing to appropriate bottom halves (Display Port specific and | ||
| 49 | * regular hotplug). | ||
| 50 | * | ||
| 51 | * The Display Port work function i915_digport_work_func() calls into | ||
| 52 | * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long | ||
| 53 | * pulses, with failures and non-MST long pulses triggering regular hotplug | ||
| 54 | * processing on the connector. | ||
| 55 | * | ||
| 56 | * The regular hotplug work function i915_hotplug_work_func() calls connector | ||
| 57 | * detect hooks, and, if connector status changes, triggers sending of hotplug | ||
| 58 | * uevent to userspace via drm_kms_helper_hotplug_event(). | ||
| 59 | * | ||
| 60 | * Finally, the userspace is responsible for triggering a modeset upon receiving | ||
| 61 | * the hotplug uevent, disabling or enabling the crtc as needed. | ||
| 62 | * | ||
| 63 | * The hotplug interrupt storm detection and mitigation code keeps track of the | ||
| 64 | * number of interrupts per hotplug pin per a period of time, and if the number | ||
| 65 | * of interrupts exceeds a certain threshold, the interrupt is disabled for a | ||
| 66 | * while before being re-enabled. The intention is to mitigate issues raising | ||
| 67 | * from broken hardware triggering massive amounts of interrupts and grinding | ||
| 68 | * the system to a halt. | ||
| 69 | * | ||
| 70 | * Current implementation expects that hotplug interrupt storm will not be | ||
| 71 | * seen when display port sink is connected, hence on platforms whose DP | ||
| 72 | * callback is handled by i915_digport_work_func reenabling of hpd is not | ||
| 73 | * performed (it was never expected to be disabled in the first place ;) ) | ||
| 74 | * this is specific to DP sinks handled by this routine and any other display | ||
| 75 | * such as HDMI or DVI enabled on the same port will have proper logic since | ||
| 76 | * it will use i915_hotplug_work_func where this logic is handled. | ||
| 77 | */ | ||
| 78 | |||
| 79 | enum port intel_hpd_pin_to_port(enum hpd_pin pin) | ||
| 80 | { | ||
| 81 | switch (pin) { | ||
| 82 | case HPD_PORT_B: | ||
| 83 | return PORT_B; | ||
| 84 | case HPD_PORT_C: | ||
| 85 | return PORT_C; | ||
| 86 | case HPD_PORT_D: | ||
| 87 | return PORT_D; | ||
| 88 | default: | ||
| 89 | return PORT_A; /* no hpd */ | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | #define HPD_STORM_DETECT_PERIOD 1000 | ||
| 94 | #define HPD_STORM_THRESHOLD 5 | ||
| 95 | #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) | ||
| 96 | |||
| 97 | /** | ||
| 98 | * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin | ||
| 99 | * @dev_priv: private driver data pointer | ||
| 100 | * @pin: the pin to gather stats on | ||
| 101 | * | ||
| 102 | * Gather stats about HPD irqs from the specified @pin, and detect irq | ||
| 103 | * storms. Only the pin specific stats and state are changed, the caller is | ||
| 104 | * responsible for further action. | ||
| 105 | * | ||
| 106 | * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms, | ||
| 107 | * otherwise it's considered an irq storm, and the irq state is set to | ||
| 108 | * @HPD_MARK_DISABLED. | ||
| 109 | * | ||
| 110 | * Return true if an irq storm was detected on @pin. | ||
| 111 | */ | ||
| 112 | static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, | ||
| 113 | enum hpd_pin pin) | ||
| 114 | { | ||
| 115 | unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; | ||
| 116 | unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); | ||
| 117 | bool storm = false; | ||
| 118 | |||
| 119 | if (!time_in_range(jiffies, start, end)) { | ||
| 120 | dev_priv->hotplug.stats[pin].last_jiffies = jiffies; | ||
| 121 | dev_priv->hotplug.stats[pin].count = 0; | ||
| 122 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); | ||
| 123 | } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) { | ||
| 124 | dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; | ||
| 125 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); | ||
| 126 | storm = true; | ||
| 127 | } else { | ||
| 128 | dev_priv->hotplug.stats[pin].count++; | ||
| 129 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, | ||
| 130 | dev_priv->hotplug.stats[pin].count); | ||
| 131 | } | ||
| 132 | |||
| 133 | return storm; | ||
| 134 | } | ||
| 135 | |||
| 136 | static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) | ||
| 137 | { | ||
| 138 | struct drm_device *dev = dev_priv->dev; | ||
| 139 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 140 | struct intel_connector *intel_connector; | ||
| 141 | struct intel_encoder *intel_encoder; | ||
| 142 | struct drm_connector *connector; | ||
| 143 | enum hpd_pin pin; | ||
| 144 | bool hpd_disabled = false; | ||
| 145 | |||
| 146 | assert_spin_locked(&dev_priv->irq_lock); | ||
| 147 | |||
| 148 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 149 | if (connector->polled != DRM_CONNECTOR_POLL_HPD) | ||
| 150 | continue; | ||
| 151 | |||
| 152 | intel_connector = to_intel_connector(connector); | ||
| 153 | intel_encoder = intel_connector->encoder; | ||
| 154 | if (!intel_encoder) | ||
| 155 | continue; | ||
| 156 | |||
| 157 | pin = intel_encoder->hpd_pin; | ||
| 158 | if (pin == HPD_NONE || | ||
| 159 | dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) | ||
| 160 | continue; | ||
| 161 | |||
| 162 | DRM_INFO("HPD interrupt storm detected on connector %s: " | ||
| 163 | "switching from hotplug detection to polling\n", | ||
| 164 | connector->name); | ||
| 165 | |||
| 166 | dev_priv->hotplug.stats[pin].state = HPD_DISABLED; | ||
| 167 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | ||
| 168 | | DRM_CONNECTOR_POLL_DISCONNECT; | ||
| 169 | hpd_disabled = true; | ||
| 170 | } | ||
| 171 | |||
| 172 | /* Enable polling and queue hotplug re-enabling. */ | ||
| 173 | if (hpd_disabled) { | ||
| 174 | drm_kms_helper_poll_enable(dev); | ||
| 175 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, | ||
| 176 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); | ||
| 177 | } | ||
| 178 | } | ||
| 179 | |||
| 180 | static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) | ||
| 181 | { | ||
| 182 | struct drm_i915_private *dev_priv = | ||
| 183 | container_of(work, typeof(*dev_priv), | ||
| 184 | hotplug.reenable_work.work); | ||
| 185 | struct drm_device *dev = dev_priv->dev; | ||
| 186 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 187 | int i; | ||
| 188 | |||
| 189 | intel_runtime_pm_get(dev_priv); | ||
| 190 | |||
| 191 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 192 | for_each_hpd_pin(i) { | ||
| 193 | struct drm_connector *connector; | ||
| 194 | |||
| 195 | if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) | ||
| 196 | continue; | ||
| 197 | |||
| 198 | dev_priv->hotplug.stats[i].state = HPD_ENABLED; | ||
| 199 | |||
| 200 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 201 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
| 202 | |||
| 203 | if (intel_connector->encoder->hpd_pin == i) { | ||
| 204 | if (connector->polled != intel_connector->polled) | ||
| 205 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | ||
| 206 | connector->name); | ||
| 207 | connector->polled = intel_connector->polled; | ||
| 208 | if (!connector->polled) | ||
| 209 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 210 | } | ||
| 211 | } | ||
| 212 | } | ||
| 213 | if (dev_priv->display.hpd_irq_setup) | ||
| 214 | dev_priv->display.hpd_irq_setup(dev); | ||
| 215 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 216 | |||
| 217 | intel_runtime_pm_put(dev_priv); | ||
| 218 | } | ||
| 219 | |||
| 220 | static bool intel_hpd_irq_event(struct drm_device *dev, | ||
| 221 | struct drm_connector *connector) | ||
| 222 | { | ||
| 223 | enum drm_connector_status old_status; | ||
| 224 | |||
| 225 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
| 226 | old_status = connector->status; | ||
| 227 | |||
| 228 | connector->status = connector->funcs->detect(connector, false); | ||
| 229 | if (old_status == connector->status) | ||
| 230 | return false; | ||
| 231 | |||
| 232 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | ||
| 233 | connector->base.id, | ||
| 234 | connector->name, | ||
| 235 | drm_get_connector_status_name(old_status), | ||
| 236 | drm_get_connector_status_name(connector->status)); | ||
| 237 | |||
| 238 | return true; | ||
| 239 | } | ||
| 240 | |||
| 241 | static void i915_digport_work_func(struct work_struct *work) | ||
| 242 | { | ||
| 243 | struct drm_i915_private *dev_priv = | ||
| 244 | container_of(work, struct drm_i915_private, hotplug.dig_port_work); | ||
| 245 | u32 long_port_mask, short_port_mask; | ||
| 246 | struct intel_digital_port *intel_dig_port; | ||
| 247 | int i; | ||
| 248 | u32 old_bits = 0; | ||
| 249 | |||
| 250 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 251 | long_port_mask = dev_priv->hotplug.long_port_mask; | ||
| 252 | dev_priv->hotplug.long_port_mask = 0; | ||
| 253 | short_port_mask = dev_priv->hotplug.short_port_mask; | ||
| 254 | dev_priv->hotplug.short_port_mask = 0; | ||
| 255 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 256 | |||
| 257 | for (i = 0; i < I915_MAX_PORTS; i++) { | ||
| 258 | bool valid = false; | ||
| 259 | bool long_hpd = false; | ||
| 260 | intel_dig_port = dev_priv->hotplug.irq_port[i]; | ||
| 261 | if (!intel_dig_port || !intel_dig_port->hpd_pulse) | ||
| 262 | continue; | ||
| 263 | |||
| 264 | if (long_port_mask & (1 << i)) { | ||
| 265 | valid = true; | ||
| 266 | long_hpd = true; | ||
| 267 | } else if (short_port_mask & (1 << i)) | ||
| 268 | valid = true; | ||
| 269 | |||
| 270 | if (valid) { | ||
| 271 | enum irqreturn ret; | ||
| 272 | |||
| 273 | ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); | ||
| 274 | if (ret == IRQ_NONE) { | ||
| 275 | /* fall back to old school hpd */ | ||
| 276 | old_bits |= (1 << intel_dig_port->base.hpd_pin); | ||
| 277 | } | ||
| 278 | } | ||
| 279 | } | ||
| 280 | |||
| 281 | if (old_bits) { | ||
| 282 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 283 | dev_priv->hotplug.event_bits |= old_bits; | ||
| 284 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 285 | schedule_work(&dev_priv->hotplug.hotplug_work); | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | /* | ||
| 290 | * Handle hotplug events outside the interrupt handler proper. | ||
| 291 | */ | ||
| 292 | static void i915_hotplug_work_func(struct work_struct *work) | ||
| 293 | { | ||
| 294 | struct drm_i915_private *dev_priv = | ||
| 295 | container_of(work, struct drm_i915_private, hotplug.hotplug_work); | ||
| 296 | struct drm_device *dev = dev_priv->dev; | ||
| 297 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 298 | struct intel_connector *intel_connector; | ||
| 299 | struct intel_encoder *intel_encoder; | ||
| 300 | struct drm_connector *connector; | ||
| 301 | bool changed = false; | ||
| 302 | u32 hpd_event_bits; | ||
| 303 | |||
| 304 | mutex_lock(&mode_config->mutex); | ||
| 305 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | ||
| 306 | |||
| 307 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 308 | |||
| 309 | hpd_event_bits = dev_priv->hotplug.event_bits; | ||
| 310 | dev_priv->hotplug.event_bits = 0; | ||
| 311 | |||
| 312 | /* Disable hotplug on connectors that hit an irq storm. */ | ||
| 313 | intel_hpd_irq_storm_disable(dev_priv); | ||
| 314 | |||
| 315 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 316 | |||
| 317 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 318 | intel_connector = to_intel_connector(connector); | ||
| 319 | if (!intel_connector->encoder) | ||
| 320 | continue; | ||
| 321 | intel_encoder = intel_connector->encoder; | ||
| 322 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | ||
| 323 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | ||
| 324 | connector->name, intel_encoder->hpd_pin); | ||
| 325 | if (intel_encoder->hot_plug) | ||
| 326 | intel_encoder->hot_plug(intel_encoder); | ||
| 327 | if (intel_hpd_irq_event(dev, connector)) | ||
| 328 | changed = true; | ||
| 329 | } | ||
| 330 | } | ||
| 331 | mutex_unlock(&mode_config->mutex); | ||
| 332 | |||
| 333 | if (changed) | ||
| 334 | drm_kms_helper_hotplug_event(dev); | ||
| 335 | } | ||
| 336 | |||
| 337 | |||
| 338 | /** | ||
| 339 | * intel_hpd_irq_handler - main hotplug irq handler | ||
| 340 | * @dev: drm device | ||
| 341 | * @pin_mask: a mask of hpd pins that have triggered the irq | ||
| 342 | * @long_mask: a mask of hpd pins that may be long hpd pulses | ||
| 343 | * | ||
| 344 | * This is the main hotplug irq handler for all platforms. The platform specific | ||
| 345 | * irq handlers call the platform specific hotplug irq handlers, which read and | ||
| 346 | * decode the appropriate registers into bitmasks about hpd pins that have | ||
| 347 | * triggered (@pin_mask), and which of those pins may be long pulses | ||
| 348 | * (@long_mask). The @long_mask is ignored if the port corresponding to the pin | ||
| 349 | * is not a digital port. | ||
| 350 | * | ||
| 351 | * Here, we do hotplug irq storm detection and mitigation, and pass further | ||
| 352 | * processing to appropriate bottom halves. | ||
| 353 | */ | ||
| 354 | void intel_hpd_irq_handler(struct drm_device *dev, | ||
| 355 | u32 pin_mask, u32 long_mask) | ||
| 356 | { | ||
| 357 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 358 | int i; | ||
| 359 | enum port port; | ||
| 360 | bool storm_detected = false; | ||
| 361 | bool queue_dig = false, queue_hp = false; | ||
| 362 | bool is_dig_port; | ||
| 363 | |||
| 364 | if (!pin_mask) | ||
| 365 | return; | ||
| 366 | |||
| 367 | spin_lock(&dev_priv->irq_lock); | ||
| 368 | for_each_hpd_pin(i) { | ||
| 369 | if (!(BIT(i) & pin_mask)) | ||
| 370 | continue; | ||
| 371 | |||
| 372 | port = intel_hpd_pin_to_port(i); | ||
| 373 | is_dig_port = port && dev_priv->hotplug.irq_port[port]; | ||
| 374 | |||
| 375 | if (is_dig_port) { | ||
| 376 | bool long_hpd = long_mask & BIT(i); | ||
| 377 | |||
| 378 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), | ||
| 379 | long_hpd ? "long" : "short"); | ||
| 380 | /* | ||
| 381 | * For long HPD pulses we want to have the digital queue happen, | ||
| 382 | * but we still want HPD storm detection to function. | ||
| 383 | */ | ||
| 384 | queue_dig = true; | ||
| 385 | if (long_hpd) { | ||
| 386 | dev_priv->hotplug.long_port_mask |= (1 << port); | ||
| 387 | } else { | ||
| 388 | /* for short HPD just trigger the digital queue */ | ||
| 389 | dev_priv->hotplug.short_port_mask |= (1 << port); | ||
| 390 | continue; | ||
| 391 | } | ||
| 392 | } | ||
| 393 | |||
| 394 | if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { | ||
| 395 | /* | ||
| 396 | * On GMCH platforms the interrupt mask bits only | ||
| 397 | * prevent irq generation, not the setting of the | ||
| 398 | * hotplug bits itself. So only WARN about unexpected | ||
| 399 | * interrupts on saner platforms. | ||
| 400 | */ | ||
| 401 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | ||
| 402 | "Received HPD interrupt on pin %d although disabled\n", i); | ||
| 403 | continue; | ||
| 404 | } | ||
| 405 | |||
| 406 | if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) | ||
| 407 | continue; | ||
| 408 | |||
| 409 | if (!is_dig_port) { | ||
| 410 | dev_priv->hotplug.event_bits |= BIT(i); | ||
| 411 | queue_hp = true; | ||
| 412 | } | ||
| 413 | |||
| 414 | if (intel_hpd_irq_storm_detect(dev_priv, i)) { | ||
| 415 | dev_priv->hotplug.event_bits &= ~BIT(i); | ||
| 416 | storm_detected = true; | ||
| 417 | } | ||
| 418 | } | ||
| 419 | |||
| 420 | if (storm_detected) | ||
| 421 | dev_priv->display.hpd_irq_setup(dev); | ||
| 422 | spin_unlock(&dev_priv->irq_lock); | ||
| 423 | |||
| 424 | /* | ||
| 425 | * Our hotplug handler can grab modeset locks (by calling down into the | ||
| 426 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | ||
| 427 | * queue for otherwise the flush_work in the pageflip code will | ||
| 428 | * deadlock. | ||
| 429 | */ | ||
| 430 | if (queue_dig) | ||
| 431 | queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); | ||
| 432 | if (queue_hp) | ||
| 433 | schedule_work(&dev_priv->hotplug.hotplug_work); | ||
| 434 | } | ||
| 435 | |||
| 436 | /** | ||
| 437 | * intel_hpd_init - initializes and enables hpd support | ||
| 438 | * @dev_priv: i915 device instance | ||
| 439 | * | ||
| 440 | * This function enables the hotplug support. It requires that interrupts have | ||
| 441 | * already been enabled with intel_irq_init_hw(). From this point on hotplug and | ||
| 442 | * poll request can run concurrently to other code, so locking rules must be | ||
| 443 | * obeyed. | ||
| 444 | * | ||
| 445 | * This is a separate step from interrupt enabling to simplify the locking rules | ||
| 446 | * in the driver load and resume code. | ||
| 447 | */ | ||
| 448 | void intel_hpd_init(struct drm_i915_private *dev_priv) | ||
| 449 | { | ||
| 450 | struct drm_device *dev = dev_priv->dev; | ||
| 451 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 452 | struct drm_connector *connector; | ||
| 453 | int i; | ||
| 454 | |||
| 455 | for_each_hpd_pin(i) { | ||
| 456 | dev_priv->hotplug.stats[i].count = 0; | ||
| 457 | dev_priv->hotplug.stats[i].state = HPD_ENABLED; | ||
| 458 | } | ||
| 459 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
| 460 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
| 461 | connector->polled = intel_connector->polled; | ||
| 462 | if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | ||
| 463 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 464 | if (intel_connector->mst_port) | ||
| 465 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 466 | } | ||
| 467 | |||
| 468 | /* | ||
| 469 | * Interrupt setup is already guaranteed to be single-threaded, this is | ||
| 470 | * just to make the assert_spin_locked checks happy. | ||
| 471 | */ | ||
| 472 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 473 | if (dev_priv->display.hpd_irq_setup) | ||
| 474 | dev_priv->display.hpd_irq_setup(dev); | ||
| 475 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 476 | } | ||
| 477 | |||
| 478 | void intel_hpd_init_work(struct drm_i915_private *dev_priv) | ||
| 479 | { | ||
| 480 | INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); | ||
| 481 | INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); | ||
| 482 | INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, | ||
| 483 | intel_hpd_irq_storm_reenable_work); | ||
| 484 | } | ||
| 485 | |||
| 486 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) | ||
| 487 | { | ||
| 488 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 489 | |||
| 490 | dev_priv->hotplug.long_port_mask = 0; | ||
| 491 | dev_priv->hotplug.short_port_mask = 0; | ||
| 492 | dev_priv->hotplug.event_bits = 0; | ||
| 493 | |||
| 494 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 495 | |||
| 496 | cancel_work_sync(&dev_priv->hotplug.dig_port_work); | ||
| 497 | cancel_work_sync(&dev_priv->hotplug.hotplug_work); | ||
| 498 | cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); | ||
| 499 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 9b74ffae5f5a..9faad82c42ec 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -135,6 +135,7 @@ | |||
| 135 | #include <drm/drmP.h> | 135 | #include <drm/drmP.h> |
| 136 | #include <drm/i915_drm.h> | 136 | #include <drm/i915_drm.h> |
| 137 | #include "i915_drv.h" | 137 | #include "i915_drv.h" |
| 138 | #include "intel_mocs.h" | ||
| 138 | 139 | ||
| 139 | #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) | 140 | #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) |
| 140 | #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) | 141 | #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) |
| @@ -190,9 +191,7 @@ | |||
| 190 | #define GEN8_CTX_PRIVILEGE (1<<8) | 191 | #define GEN8_CTX_PRIVILEGE (1<<8) |
| 191 | 192 | ||
| 192 | #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \ | 193 | #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \ |
| 193 | const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \ | 194 | const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ |
| 194 | ppgtt->pdp.page_directory[n]->daddr : \ | ||
| 195 | ppgtt->scratch_pd->daddr; \ | ||
| 196 | reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ | 195 | reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ |
| 197 | reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ | 196 | reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ |
| 198 | } | 197 | } |
| @@ -211,9 +210,9 @@ enum { | |||
| 211 | FAULT_AND_CONTINUE /* Unsupported */ | 210 | FAULT_AND_CONTINUE /* Unsupported */ |
| 212 | }; | 211 | }; |
| 213 | #define GEN8_CTX_ID_SHIFT 32 | 212 | #define GEN8_CTX_ID_SHIFT 32 |
| 213 | #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 | ||
| 214 | 214 | ||
| 215 | static int intel_lr_context_pin(struct intel_engine_cs *ring, | 215 | static int intel_lr_context_pin(struct drm_i915_gem_request *rq); |
| 216 | struct intel_context *ctx); | ||
| 217 | 216 | ||
| 218 | /** | 217 | /** |
| 219 | * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists | 218 | * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists |
| @@ -263,10 +262,11 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) | |||
| 263 | return lrca >> 12; | 262 | return lrca >> 12; |
| 264 | } | 263 | } |
| 265 | 264 | ||
| 266 | static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring, | 265 | static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq) |
| 267 | struct drm_i915_gem_object *ctx_obj) | ||
| 268 | { | 266 | { |
| 267 | struct intel_engine_cs *ring = rq->ring; | ||
| 269 | struct drm_device *dev = ring->dev; | 268 | struct drm_device *dev = ring->dev; |
| 269 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; | ||
| 270 | uint64_t desc; | 270 | uint64_t desc; |
| 271 | uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); | 271 | uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); |
| 272 | 272 | ||
| @@ -294,55 +294,59 @@ static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring, | |||
| 294 | return desc; | 294 | return desc; |
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | static void execlists_elsp_write(struct intel_engine_cs *ring, | 297 | static void execlists_elsp_write(struct drm_i915_gem_request *rq0, |
| 298 | struct drm_i915_gem_object *ctx_obj0, | 298 | struct drm_i915_gem_request *rq1) |
| 299 | struct drm_i915_gem_object *ctx_obj1) | ||
| 300 | { | 299 | { |
| 300 | |||
| 301 | struct intel_engine_cs *ring = rq0->ring; | ||
| 301 | struct drm_device *dev = ring->dev; | 302 | struct drm_device *dev = ring->dev; |
| 302 | struct drm_i915_private *dev_priv = dev->dev_private; | 303 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 303 | uint64_t temp = 0; | 304 | uint64_t desc[2]; |
| 304 | uint32_t desc[4]; | ||
| 305 | 305 | ||
| 306 | /* XXX: You must always write both descriptors in the order below. */ | 306 | if (rq1) { |
| 307 | if (ctx_obj1) | 307 | desc[1] = execlists_ctx_descriptor(rq1); |
| 308 | temp = execlists_ctx_descriptor(ring, ctx_obj1); | 308 | rq1->elsp_submitted++; |
| 309 | else | 309 | } else { |
| 310 | temp = 0; | 310 | desc[1] = 0; |
| 311 | desc[1] = (u32)(temp >> 32); | 311 | } |
| 312 | desc[0] = (u32)temp; | ||
| 313 | 312 | ||
| 314 | temp = execlists_ctx_descriptor(ring, ctx_obj0); | 313 | desc[0] = execlists_ctx_descriptor(rq0); |
| 315 | desc[3] = (u32)(temp >> 32); | 314 | rq0->elsp_submitted++; |
| 316 | desc[2] = (u32)temp; | ||
| 317 | 315 | ||
| 316 | /* You must always write both descriptors in the order below. */ | ||
| 318 | spin_lock(&dev_priv->uncore.lock); | 317 | spin_lock(&dev_priv->uncore.lock); |
| 319 | intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); | 318 | intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); |
| 320 | I915_WRITE_FW(RING_ELSP(ring), desc[1]); | 319 | I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1])); |
| 321 | I915_WRITE_FW(RING_ELSP(ring), desc[0]); | 320 | I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1])); |
| 322 | I915_WRITE_FW(RING_ELSP(ring), desc[3]); | ||
| 323 | 321 | ||
| 322 | I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0])); | ||
| 324 | /* The context is automatically loaded after the following */ | 323 | /* The context is automatically loaded after the following */ |
| 325 | I915_WRITE_FW(RING_ELSP(ring), desc[2]); | 324 | I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0])); |
| 326 | 325 | ||
| 327 | /* ELSP is a wo register, so use another nearby reg for posting instead */ | 326 | /* ELSP is a wo register, use another nearby reg for posting */ |
| 328 | POSTING_READ_FW(RING_EXECLIST_STATUS(ring)); | 327 | POSTING_READ_FW(RING_EXECLIST_STATUS(ring)); |
| 329 | intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); | 328 | intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); |
| 330 | spin_unlock(&dev_priv->uncore.lock); | 329 | spin_unlock(&dev_priv->uncore.lock); |
| 331 | } | 330 | } |
| 332 | 331 | ||
| 333 | static int execlists_update_context(struct drm_i915_gem_object *ctx_obj, | 332 | static int execlists_update_context(struct drm_i915_gem_request *rq) |
| 334 | struct drm_i915_gem_object *ring_obj, | ||
| 335 | struct i915_hw_ppgtt *ppgtt, | ||
| 336 | u32 tail) | ||
| 337 | { | 333 | { |
| 334 | struct intel_engine_cs *ring = rq->ring; | ||
| 335 | struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; | ||
| 336 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; | ||
| 337 | struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; | ||
| 338 | struct page *page; | 338 | struct page *page; |
| 339 | uint32_t *reg_state; | 339 | uint32_t *reg_state; |
| 340 | 340 | ||
| 341 | BUG_ON(!ctx_obj); | ||
| 342 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); | ||
| 343 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); | ||
| 344 | |||
| 341 | page = i915_gem_object_get_page(ctx_obj, 1); | 345 | page = i915_gem_object_get_page(ctx_obj, 1); |
| 342 | reg_state = kmap_atomic(page); | 346 | reg_state = kmap_atomic(page); |
| 343 | 347 | ||
| 344 | reg_state[CTX_RING_TAIL+1] = tail; | 348 | reg_state[CTX_RING_TAIL+1] = rq->tail; |
| 345 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); | 349 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); |
| 346 | 350 | ||
| 347 | /* True PPGTT with dynamic page allocation: update PDP registers and | 351 | /* True PPGTT with dynamic page allocation: update PDP registers and |
| 348 | * point the unallocated PDPs to the scratch page | 352 | * point the unallocated PDPs to the scratch page |
| @@ -359,32 +363,15 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj, | |||
| 359 | return 0; | 363 | return 0; |
| 360 | } | 364 | } |
| 361 | 365 | ||
| 362 | static void execlists_submit_contexts(struct intel_engine_cs *ring, | 366 | static void execlists_submit_requests(struct drm_i915_gem_request *rq0, |
| 363 | struct intel_context *to0, u32 tail0, | 367 | struct drm_i915_gem_request *rq1) |
| 364 | struct intel_context *to1, u32 tail1) | ||
| 365 | { | 368 | { |
| 366 | struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state; | 369 | execlists_update_context(rq0); |
| 367 | struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf; | ||
| 368 | struct drm_i915_gem_object *ctx_obj1 = NULL; | ||
| 369 | struct intel_ringbuffer *ringbuf1 = NULL; | ||
| 370 | |||
| 371 | BUG_ON(!ctx_obj0); | ||
| 372 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0)); | ||
| 373 | WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj)); | ||
| 374 | |||
| 375 | execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0); | ||
| 376 | 370 | ||
| 377 | if (to1) { | 371 | if (rq1) |
| 378 | ringbuf1 = to1->engine[ring->id].ringbuf; | 372 | execlists_update_context(rq1); |
| 379 | ctx_obj1 = to1->engine[ring->id].state; | ||
| 380 | BUG_ON(!ctx_obj1); | ||
| 381 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1)); | ||
| 382 | WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj)); | ||
| 383 | 373 | ||
| 384 | execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1); | 374 | execlists_elsp_write(rq0, rq1); |
| 385 | } | ||
| 386 | |||
| 387 | execlists_elsp_write(ring, ctx_obj0, ctx_obj1); | ||
| 388 | } | 375 | } |
| 389 | 376 | ||
| 390 | static void execlists_context_unqueue(struct intel_engine_cs *ring) | 377 | static void execlists_context_unqueue(struct intel_engine_cs *ring) |
| @@ -444,13 +431,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) | |||
| 444 | 431 | ||
| 445 | WARN_ON(req1 && req1->elsp_submitted); | 432 | WARN_ON(req1 && req1->elsp_submitted); |
| 446 | 433 | ||
| 447 | execlists_submit_contexts(ring, req0->ctx, req0->tail, | 434 | execlists_submit_requests(req0, req1); |
| 448 | req1 ? req1->ctx : NULL, | ||
| 449 | req1 ? req1->tail : 0); | ||
| 450 | |||
| 451 | req0->elsp_submitted++; | ||
| 452 | if (req1) | ||
| 453 | req1->elsp_submitted++; | ||
| 454 | } | 435 | } |
| 455 | 436 | ||
| 456 | static bool execlists_check_remove_request(struct intel_engine_cs *ring, | 437 | static bool execlists_check_remove_request(struct intel_engine_cs *ring, |
| @@ -543,34 +524,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) | |||
| 543 | ((u32)ring->next_context_status_buffer & 0x07) << 8); | 524 | ((u32)ring->next_context_status_buffer & 0x07) << 8); |
| 544 | } | 525 | } |
| 545 | 526 | ||
| 546 | static int execlists_context_queue(struct intel_engine_cs *ring, | 527 | static int execlists_context_queue(struct drm_i915_gem_request *request) |
| 547 | struct intel_context *to, | ||
| 548 | u32 tail, | ||
| 549 | struct drm_i915_gem_request *request) | ||
| 550 | { | 528 | { |
| 529 | struct intel_engine_cs *ring = request->ring; | ||
| 551 | struct drm_i915_gem_request *cursor; | 530 | struct drm_i915_gem_request *cursor; |
| 552 | int num_elements = 0; | 531 | int num_elements = 0; |
| 553 | 532 | ||
| 554 | if (to != ring->default_context) | 533 | if (request->ctx != ring->default_context) |
| 555 | intel_lr_context_pin(ring, to); | 534 | intel_lr_context_pin(request); |
| 556 | 535 | ||
| 557 | if (!request) { | 536 | i915_gem_request_reference(request); |
| 558 | /* | 537 | |
| 559 | * If there isn't a request associated with this submission, | 538 | request->tail = request->ringbuf->tail; |
| 560 | * create one as a temporary holder. | ||
| 561 | */ | ||
| 562 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
| 563 | if (request == NULL) | ||
| 564 | return -ENOMEM; | ||
| 565 | request->ring = ring; | ||
| 566 | request->ctx = to; | ||
| 567 | kref_init(&request->ref); | ||
| 568 | i915_gem_context_reference(request->ctx); | ||
| 569 | } else { | ||
| 570 | i915_gem_request_reference(request); | ||
| 571 | WARN_ON(to != request->ctx); | ||
| 572 | } | ||
| 573 | request->tail = tail; | ||
| 574 | 539 | ||
| 575 | spin_lock_irq(&ring->execlist_lock); | 540 | spin_lock_irq(&ring->execlist_lock); |
| 576 | 541 | ||
| @@ -585,7 +550,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring, | |||
| 585 | struct drm_i915_gem_request, | 550 | struct drm_i915_gem_request, |
| 586 | execlist_link); | 551 | execlist_link); |
| 587 | 552 | ||
| 588 | if (to == tail_req->ctx) { | 553 | if (request->ctx == tail_req->ctx) { |
| 589 | WARN(tail_req->elsp_submitted != 0, | 554 | WARN(tail_req->elsp_submitted != 0, |
| 590 | "More than 2 already-submitted reqs queued\n"); | 555 | "More than 2 already-submitted reqs queued\n"); |
| 591 | list_del(&tail_req->execlist_link); | 556 | list_del(&tail_req->execlist_link); |
| @@ -603,10 +568,9 @@ static int execlists_context_queue(struct intel_engine_cs *ring, | |||
| 603 | return 0; | 568 | return 0; |
| 604 | } | 569 | } |
| 605 | 570 | ||
| 606 | static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf, | 571 | static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) |
| 607 | struct intel_context *ctx) | ||
| 608 | { | 572 | { |
| 609 | struct intel_engine_cs *ring = ringbuf->ring; | 573 | struct intel_engine_cs *ring = req->ring; |
| 610 | uint32_t flush_domains; | 574 | uint32_t flush_domains; |
| 611 | int ret; | 575 | int ret; |
| 612 | 576 | ||
| @@ -614,8 +578,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf, | |||
| 614 | if (ring->gpu_caches_dirty) | 578 | if (ring->gpu_caches_dirty) |
| 615 | flush_domains = I915_GEM_GPU_DOMAINS; | 579 | flush_domains = I915_GEM_GPU_DOMAINS; |
| 616 | 580 | ||
| 617 | ret = ring->emit_flush(ringbuf, ctx, | 581 | ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); |
| 618 | I915_GEM_GPU_DOMAINS, flush_domains); | ||
| 619 | if (ret) | 582 | if (ret) |
| 620 | return ret; | 583 | return ret; |
| 621 | 584 | ||
| @@ -623,12 +586,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf, | |||
| 623 | return 0; | 586 | return 0; |
| 624 | } | 587 | } |
| 625 | 588 | ||
| 626 | static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, | 589 | static int execlists_move_to_gpu(struct drm_i915_gem_request *req, |
| 627 | struct intel_context *ctx, | ||
| 628 | struct list_head *vmas) | 590 | struct list_head *vmas) |
| 629 | { | 591 | { |
| 630 | struct intel_engine_cs *ring = ringbuf->ring; | 592 | const unsigned other_rings = ~intel_ring_flag(req->ring); |
| 631 | const unsigned other_rings = ~intel_ring_flag(ring); | ||
| 632 | struct i915_vma *vma; | 593 | struct i915_vma *vma; |
| 633 | uint32_t flush_domains = 0; | 594 | uint32_t flush_domains = 0; |
| 634 | bool flush_chipset = false; | 595 | bool flush_chipset = false; |
| @@ -638,7 +599,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, | |||
| 638 | struct drm_i915_gem_object *obj = vma->obj; | 599 | struct drm_i915_gem_object *obj = vma->obj; |
| 639 | 600 | ||
| 640 | if (obj->active & other_rings) { | 601 | if (obj->active & other_rings) { |
| 641 | ret = i915_gem_object_sync(obj, ring); | 602 | ret = i915_gem_object_sync(obj, req->ring, &req); |
| 642 | if (ret) | 603 | if (ret) |
| 643 | return ret; | 604 | return ret; |
| 644 | } | 605 | } |
| @@ -655,59 +616,59 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, | |||
| 655 | /* Unconditionally invalidate gpu caches and ensure that we do flush | 616 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
| 656 | * any residual writes from the previous batch. | 617 | * any residual writes from the previous batch. |
| 657 | */ | 618 | */ |
| 658 | return logical_ring_invalidate_all_caches(ringbuf, ctx); | 619 | return logical_ring_invalidate_all_caches(req); |
| 659 | } | 620 | } |
| 660 | 621 | ||
| 661 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request, | 622 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) |
| 662 | struct intel_context *ctx) | ||
| 663 | { | 623 | { |
| 664 | int ret; | 624 | int ret; |
| 665 | 625 | ||
| 666 | if (ctx != request->ring->default_context) { | 626 | request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; |
| 667 | ret = intel_lr_context_pin(request->ring, ctx); | 627 | |
| 628 | if (request->ctx != request->ring->default_context) { | ||
| 629 | ret = intel_lr_context_pin(request); | ||
| 668 | if (ret) | 630 | if (ret) |
| 669 | return ret; | 631 | return ret; |
| 670 | } | 632 | } |
| 671 | 633 | ||
| 672 | request->ringbuf = ctx->engine[request->ring->id].ringbuf; | ||
| 673 | request->ctx = ctx; | ||
| 674 | i915_gem_context_reference(request->ctx); | ||
| 675 | |||
| 676 | return 0; | 634 | return 0; |
| 677 | } | 635 | } |
| 678 | 636 | ||
| 679 | static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, | 637 | static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, |
| 680 | struct intel_context *ctx, | ||
| 681 | int bytes) | 638 | int bytes) |
| 682 | { | 639 | { |
| 683 | struct intel_engine_cs *ring = ringbuf->ring; | 640 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
| 684 | struct drm_i915_gem_request *request; | 641 | struct intel_engine_cs *ring = req->ring; |
| 642 | struct drm_i915_gem_request *target; | ||
| 685 | unsigned space; | 643 | unsigned space; |
| 686 | int ret; | 644 | int ret; |
| 687 | 645 | ||
| 688 | if (intel_ring_space(ringbuf) >= bytes) | 646 | if (intel_ring_space(ringbuf) >= bytes) |
| 689 | return 0; | 647 | return 0; |
| 690 | 648 | ||
| 691 | list_for_each_entry(request, &ring->request_list, list) { | 649 | /* The whole point of reserving space is to not wait! */ |
| 650 | WARN_ON(ringbuf->reserved_in_use); | ||
| 651 | |||
| 652 | list_for_each_entry(target, &ring->request_list, list) { | ||
| 692 | /* | 653 | /* |
| 693 | * The request queue is per-engine, so can contain requests | 654 | * The request queue is per-engine, so can contain requests |
| 694 | * from multiple ringbuffers. Here, we must ignore any that | 655 | * from multiple ringbuffers. Here, we must ignore any that |
| 695 | * aren't from the ringbuffer we're considering. | 656 | * aren't from the ringbuffer we're considering. |
| 696 | */ | 657 | */ |
| 697 | if (request->ringbuf != ringbuf) | 658 | if (target->ringbuf != ringbuf) |
| 698 | continue; | 659 | continue; |
| 699 | 660 | ||
| 700 | /* Would completion of this request free enough space? */ | 661 | /* Would completion of this request free enough space? */ |
| 701 | space = __intel_ring_space(request->postfix, ringbuf->tail, | 662 | space = __intel_ring_space(target->postfix, ringbuf->tail, |
| 702 | ringbuf->size); | 663 | ringbuf->size); |
| 703 | if (space >= bytes) | 664 | if (space >= bytes) |
| 704 | break; | 665 | break; |
| 705 | } | 666 | } |
| 706 | 667 | ||
| 707 | if (WARN_ON(&request->list == &ring->request_list)) | 668 | if (WARN_ON(&target->list == &ring->request_list)) |
| 708 | return -ENOSPC; | 669 | return -ENOSPC; |
| 709 | 670 | ||
| 710 | ret = i915_wait_request(request); | 671 | ret = i915_wait_request(target); |
| 711 | if (ret) | 672 | if (ret) |
| 712 | return ret; | 673 | return ret; |
| 713 | 674 | ||
| @@ -717,7 +678,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, | |||
| 717 | 678 | ||
| 718 | /* | 679 | /* |
| 719 | * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload | 680 | * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload |
| 720 | * @ringbuf: Logical Ringbuffer to advance. | 681 | * @request: Request to advance the logical ringbuffer of. |
| 721 | * | 682 | * |
| 722 | * The tail is updated in our logical ringbuffer struct, not in the actual context. What | 683 | * The tail is updated in our logical ringbuffer struct, not in the actual context. What |
| 723 | * really happens during submission is that the context and current tail will be placed | 684 | * really happens during submission is that the context and current tail will be placed |
| @@ -725,33 +686,23 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, | |||
| 725 | * point, the tail *inside* the context is updated and the ELSP written to. | 686 | * point, the tail *inside* the context is updated and the ELSP written to. |
| 726 | */ | 687 | */ |
| 727 | static void | 688 | static void |
| 728 | intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf, | 689 | intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) |
| 729 | struct intel_context *ctx, | ||
| 730 | struct drm_i915_gem_request *request) | ||
| 731 | { | 690 | { |
| 732 | struct intel_engine_cs *ring = ringbuf->ring; | 691 | struct intel_engine_cs *ring = request->ring; |
| 733 | 692 | ||
| 734 | intel_logical_ring_advance(ringbuf); | 693 | intel_logical_ring_advance(request->ringbuf); |
| 735 | 694 | ||
| 736 | if (intel_ring_stopped(ring)) | 695 | if (intel_ring_stopped(ring)) |
| 737 | return; | 696 | return; |
| 738 | 697 | ||
| 739 | execlists_context_queue(ring, ctx, ringbuf->tail, request); | 698 | execlists_context_queue(request); |
| 740 | } | 699 | } |
| 741 | 700 | ||
| 742 | static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf, | 701 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) |
| 743 | struct intel_context *ctx) | ||
| 744 | { | 702 | { |
| 745 | uint32_t __iomem *virt; | 703 | uint32_t __iomem *virt; |
| 746 | int rem = ringbuf->size - ringbuf->tail; | 704 | int rem = ringbuf->size - ringbuf->tail; |
| 747 | 705 | ||
| 748 | if (ringbuf->space < rem) { | ||
| 749 | int ret = logical_ring_wait_for_space(ringbuf, ctx, rem); | ||
| 750 | |||
| 751 | if (ret) | ||
| 752 | return ret; | ||
| 753 | } | ||
| 754 | |||
| 755 | virt = ringbuf->virtual_start + ringbuf->tail; | 706 | virt = ringbuf->virtual_start + ringbuf->tail; |
| 756 | rem /= 4; | 707 | rem /= 4; |
| 757 | while (rem--) | 708 | while (rem--) |
| @@ -759,25 +710,50 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf, | |||
| 759 | 710 | ||
| 760 | ringbuf->tail = 0; | 711 | ringbuf->tail = 0; |
| 761 | intel_ring_update_space(ringbuf); | 712 | intel_ring_update_space(ringbuf); |
| 762 | |||
| 763 | return 0; | ||
| 764 | } | 713 | } |
| 765 | 714 | ||
| 766 | static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, | 715 | static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) |
| 767 | struct intel_context *ctx, int bytes) | ||
| 768 | { | 716 | { |
| 769 | int ret; | 717 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
| 718 | int remain_usable = ringbuf->effective_size - ringbuf->tail; | ||
| 719 | int remain_actual = ringbuf->size - ringbuf->tail; | ||
| 720 | int ret, total_bytes, wait_bytes = 0; | ||
| 721 | bool need_wrap = false; | ||
| 722 | |||
| 723 | if (ringbuf->reserved_in_use) | ||
| 724 | total_bytes = bytes; | ||
| 725 | else | ||
| 726 | total_bytes = bytes + ringbuf->reserved_size; | ||
| 770 | 727 | ||
| 771 | if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { | 728 | if (unlikely(bytes > remain_usable)) { |
| 772 | ret = logical_ring_wrap_buffer(ringbuf, ctx); | 729 | /* |
| 773 | if (unlikely(ret)) | 730 | * Not enough space for the basic request. So need to flush |
| 774 | return ret; | 731 | * out the remainder and then wait for base + reserved. |
| 732 | */ | ||
| 733 | wait_bytes = remain_actual + total_bytes; | ||
| 734 | need_wrap = true; | ||
| 735 | } else { | ||
| 736 | if (unlikely(total_bytes > remain_usable)) { | ||
| 737 | /* | ||
| 738 | * The base request will fit but the reserved space | ||
| 739 | * falls off the end. So only need to to wait for the | ||
| 740 | * reserved size after flushing out the remainder. | ||
| 741 | */ | ||
| 742 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
| 743 | need_wrap = true; | ||
| 744 | } else if (total_bytes > ringbuf->space) { | ||
| 745 | /* No wrapping required, just waiting. */ | ||
| 746 | wait_bytes = total_bytes; | ||
| 747 | } | ||
| 775 | } | 748 | } |
| 776 | 749 | ||
| 777 | if (unlikely(ringbuf->space < bytes)) { | 750 | if (wait_bytes) { |
| 778 | ret = logical_ring_wait_for_space(ringbuf, ctx, bytes); | 751 | ret = logical_ring_wait_for_space(req, wait_bytes); |
| 779 | if (unlikely(ret)) | 752 | if (unlikely(ret)) |
| 780 | return ret; | 753 | return ret; |
| 754 | |||
| 755 | if (need_wrap) | ||
| 756 | __wrap_ring_buffer(ringbuf); | ||
| 781 | } | 757 | } |
| 782 | 758 | ||
| 783 | return 0; | 759 | return 0; |
| @@ -786,7 +762,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, | |||
| 786 | /** | 762 | /** |
| 787 | * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands | 763 | * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands |
| 788 | * | 764 | * |
| 789 | * @ringbuf: Logical ringbuffer. | 765 | * @request: The request to start some new work for |
| 766 | * @ctx: Logical ring context whose ringbuffer is being prepared. | ||
| 790 | * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. | 767 | * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. |
| 791 | * | 768 | * |
| 792 | * The ringbuffer might not be ready to accept the commands right away (maybe it needs to | 769 | * The ringbuffer might not be ready to accept the commands right away (maybe it needs to |
| @@ -796,32 +773,42 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, | |||
| 796 | * | 773 | * |
| 797 | * Return: non-zero if the ringbuffer is not ready to be written to. | 774 | * Return: non-zero if the ringbuffer is not ready to be written to. |
| 798 | */ | 775 | */ |
| 799 | static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, | 776 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) |
| 800 | struct intel_context *ctx, int num_dwords) | ||
| 801 | { | 777 | { |
| 802 | struct intel_engine_cs *ring = ringbuf->ring; | 778 | struct drm_i915_private *dev_priv; |
| 803 | struct drm_device *dev = ring->dev; | ||
| 804 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 805 | int ret; | 779 | int ret; |
| 806 | 780 | ||
| 781 | WARN_ON(req == NULL); | ||
| 782 | dev_priv = req->ring->dev->dev_private; | ||
| 783 | |||
| 807 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | 784 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
| 808 | dev_priv->mm.interruptible); | 785 | dev_priv->mm.interruptible); |
| 809 | if (ret) | 786 | if (ret) |
| 810 | return ret; | 787 | return ret; |
| 811 | 788 | ||
| 812 | ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t)); | 789 | ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); |
| 813 | if (ret) | ||
| 814 | return ret; | ||
| 815 | |||
| 816 | /* Preallocate the olr before touching the ring */ | ||
| 817 | ret = i915_gem_request_alloc(ring, ctx); | ||
| 818 | if (ret) | 790 | if (ret) |
| 819 | return ret; | 791 | return ret; |
| 820 | 792 | ||
| 821 | ringbuf->space -= num_dwords * sizeof(uint32_t); | 793 | req->ringbuf->space -= num_dwords * sizeof(uint32_t); |
| 822 | return 0; | 794 | return 0; |
| 823 | } | 795 | } |
| 824 | 796 | ||
| 797 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) | ||
| 798 | { | ||
| 799 | /* | ||
| 800 | * The first call merely notes the reserve request and is common for | ||
| 801 | * all back ends. The subsequent localised _begin() call actually | ||
| 802 | * ensures that the reservation is available. Without the begin, if | ||
| 803 | * the request creator immediately submitted the request without | ||
| 804 | * adding any commands to it then there might not actually be | ||
| 805 | * sufficient room for the submission commands. | ||
| 806 | */ | ||
| 807 | intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); | ||
| 808 | |||
| 809 | return intel_logical_ring_begin(request, 0); | ||
| 810 | } | ||
| 811 | |||
| 825 | /** | 812 | /** |
| 826 | * execlists_submission() - submit a batchbuffer for execution, Execlists style | 813 | * execlists_submission() - submit a batchbuffer for execution, Execlists style |
| 827 | * @dev: DRM device. | 814 | * @dev: DRM device. |
| @@ -839,16 +826,15 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, | |||
| 839 | * | 826 | * |
| 840 | * Return: non-zero if the submission fails. | 827 | * Return: non-zero if the submission fails. |
| 841 | */ | 828 | */ |
| 842 | int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, | 829 | int intel_execlists_submission(struct i915_execbuffer_params *params, |
| 843 | struct intel_engine_cs *ring, | ||
| 844 | struct intel_context *ctx, | ||
| 845 | struct drm_i915_gem_execbuffer2 *args, | 830 | struct drm_i915_gem_execbuffer2 *args, |
| 846 | struct list_head *vmas, | 831 | struct list_head *vmas) |
| 847 | struct drm_i915_gem_object *batch_obj, | ||
| 848 | u64 exec_start, u32 dispatch_flags) | ||
| 849 | { | 832 | { |
| 833 | struct drm_device *dev = params->dev; | ||
| 834 | struct intel_engine_cs *ring = params->ring; | ||
| 850 | struct drm_i915_private *dev_priv = dev->dev_private; | 835 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 851 | struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; | 836 | struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf; |
| 837 | u64 exec_start; | ||
| 852 | int instp_mode; | 838 | int instp_mode; |
| 853 | u32 instp_mask; | 839 | u32 instp_mask; |
| 854 | int ret; | 840 | int ret; |
| @@ -899,13 +885,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, | |||
| 899 | return -EINVAL; | 885 | return -EINVAL; |
| 900 | } | 886 | } |
| 901 | 887 | ||
| 902 | ret = execlists_move_to_gpu(ringbuf, ctx, vmas); | 888 | ret = execlists_move_to_gpu(params->request, vmas); |
| 903 | if (ret) | 889 | if (ret) |
| 904 | return ret; | 890 | return ret; |
| 905 | 891 | ||
| 906 | if (ring == &dev_priv->ring[RCS] && | 892 | if (ring == &dev_priv->ring[RCS] && |
| 907 | instp_mode != dev_priv->relative_constants_mode) { | 893 | instp_mode != dev_priv->relative_constants_mode) { |
| 908 | ret = intel_logical_ring_begin(ringbuf, ctx, 4); | 894 | ret = intel_logical_ring_begin(params->request, 4); |
| 909 | if (ret) | 895 | if (ret) |
| 910 | return ret; | 896 | return ret; |
| 911 | 897 | ||
| @@ -918,14 +904,17 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, | |||
| 918 | dev_priv->relative_constants_mode = instp_mode; | 904 | dev_priv->relative_constants_mode = instp_mode; |
| 919 | } | 905 | } |
| 920 | 906 | ||
| 921 | ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags); | 907 | exec_start = params->batch_obj_vm_offset + |
| 908 | args->batch_start_offset; | ||
| 909 | |||
| 910 | ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags); | ||
| 922 | if (ret) | 911 | if (ret) |
| 923 | return ret; | 912 | return ret; |
| 924 | 913 | ||
| 925 | trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags); | 914 | trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); |
| 926 | 915 | ||
| 927 | i915_gem_execbuffer_move_to_active(vmas, ring); | 916 | i915_gem_execbuffer_move_to_active(vmas, params->request); |
| 928 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); | 917 | i915_gem_execbuffer_retire_commands(params); |
| 929 | 918 | ||
| 930 | return 0; | 919 | return 0; |
| 931 | } | 920 | } |
| @@ -950,7 +939,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) | |||
| 950 | ctx->engine[ring->id].state; | 939 | ctx->engine[ring->id].state; |
| 951 | 940 | ||
| 952 | if (ctx_obj && (ctx != ring->default_context)) | 941 | if (ctx_obj && (ctx != ring->default_context)) |
| 953 | intel_lr_context_unpin(ring, ctx); | 942 | intel_lr_context_unpin(req); |
| 954 | list_del(&req->execlist_link); | 943 | list_del(&req->execlist_link); |
| 955 | i915_gem_request_unreference(req); | 944 | i915_gem_request_unreference(req); |
| 956 | } | 945 | } |
| @@ -978,16 +967,15 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring) | |||
| 978 | I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); | 967 | I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); |
| 979 | } | 968 | } |
| 980 | 969 | ||
| 981 | int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf, | 970 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) |
| 982 | struct intel_context *ctx) | ||
| 983 | { | 971 | { |
| 984 | struct intel_engine_cs *ring = ringbuf->ring; | 972 | struct intel_engine_cs *ring = req->ring; |
| 985 | int ret; | 973 | int ret; |
| 986 | 974 | ||
| 987 | if (!ring->gpu_caches_dirty) | 975 | if (!ring->gpu_caches_dirty) |
| 988 | return 0; | 976 | return 0; |
| 989 | 977 | ||
| 990 | ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS); | 978 | ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS); |
| 991 | if (ret) | 979 | if (ret) |
| 992 | return ret; | 980 | return ret; |
| 993 | 981 | ||
| @@ -995,15 +983,15 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf, | |||
| 995 | return 0; | 983 | return 0; |
| 996 | } | 984 | } |
| 997 | 985 | ||
| 998 | static int intel_lr_context_pin(struct intel_engine_cs *ring, | 986 | static int intel_lr_context_pin(struct drm_i915_gem_request *rq) |
| 999 | struct intel_context *ctx) | ||
| 1000 | { | 987 | { |
| 1001 | struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; | 988 | struct intel_engine_cs *ring = rq->ring; |
| 1002 | struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; | 989 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; |
| 990 | struct intel_ringbuffer *ringbuf = rq->ringbuf; | ||
| 1003 | int ret = 0; | 991 | int ret = 0; |
| 1004 | 992 | ||
| 1005 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | 993 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
| 1006 | if (ctx->engine[ring->id].pin_count++ == 0) { | 994 | if (rq->ctx->engine[ring->id].pin_count++ == 0) { |
| 1007 | ret = i915_gem_obj_ggtt_pin(ctx_obj, | 995 | ret = i915_gem_obj_ggtt_pin(ctx_obj, |
| 1008 | GEN8_LR_CONTEXT_ALIGN, 0); | 996 | GEN8_LR_CONTEXT_ALIGN, 0); |
| 1009 | if (ret) | 997 | if (ret) |
| @@ -1019,31 +1007,31 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring, | |||
| 1019 | unpin_ctx_obj: | 1007 | unpin_ctx_obj: |
| 1020 | i915_gem_object_ggtt_unpin(ctx_obj); | 1008 | i915_gem_object_ggtt_unpin(ctx_obj); |
| 1021 | reset_pin_count: | 1009 | reset_pin_count: |
| 1022 | ctx->engine[ring->id].pin_count = 0; | 1010 | rq->ctx->engine[ring->id].pin_count = 0; |
| 1023 | 1011 | ||
| 1024 | return ret; | 1012 | return ret; |
| 1025 | } | 1013 | } |
| 1026 | 1014 | ||
| 1027 | void intel_lr_context_unpin(struct intel_engine_cs *ring, | 1015 | void intel_lr_context_unpin(struct drm_i915_gem_request *rq) |
| 1028 | struct intel_context *ctx) | ||
| 1029 | { | 1016 | { |
| 1030 | struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; | 1017 | struct intel_engine_cs *ring = rq->ring; |
| 1031 | struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; | 1018 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; |
| 1019 | struct intel_ringbuffer *ringbuf = rq->ringbuf; | ||
| 1032 | 1020 | ||
| 1033 | if (ctx_obj) { | 1021 | if (ctx_obj) { |
| 1034 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | 1022 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
| 1035 | if (--ctx->engine[ring->id].pin_count == 0) { | 1023 | if (--rq->ctx->engine[ring->id].pin_count == 0) { |
| 1036 | intel_unpin_ringbuffer_obj(ringbuf); | 1024 | intel_unpin_ringbuffer_obj(ringbuf); |
| 1037 | i915_gem_object_ggtt_unpin(ctx_obj); | 1025 | i915_gem_object_ggtt_unpin(ctx_obj); |
| 1038 | } | 1026 | } |
| 1039 | } | 1027 | } |
| 1040 | } | 1028 | } |
| 1041 | 1029 | ||
| 1042 | static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, | 1030 | static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) |
| 1043 | struct intel_context *ctx) | ||
| 1044 | { | 1031 | { |
| 1045 | int ret, i; | 1032 | int ret, i; |
| 1046 | struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; | 1033 | struct intel_engine_cs *ring = req->ring; |
| 1034 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
| 1047 | struct drm_device *dev = ring->dev; | 1035 | struct drm_device *dev = ring->dev; |
| 1048 | struct drm_i915_private *dev_priv = dev->dev_private; | 1036 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1049 | struct i915_workarounds *w = &dev_priv->workarounds; | 1037 | struct i915_workarounds *w = &dev_priv->workarounds; |
| @@ -1052,11 +1040,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
| 1052 | return 0; | 1040 | return 0; |
| 1053 | 1041 | ||
| 1054 | ring->gpu_caches_dirty = true; | 1042 | ring->gpu_caches_dirty = true; |
| 1055 | ret = logical_ring_flush_all_caches(ringbuf, ctx); | 1043 | ret = logical_ring_flush_all_caches(req); |
| 1056 | if (ret) | 1044 | if (ret) |
| 1057 | return ret; | 1045 | return ret; |
| 1058 | 1046 | ||
| 1059 | ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2); | 1047 | ret = intel_logical_ring_begin(req, w->count * 2 + 2); |
| 1060 | if (ret) | 1048 | if (ret) |
| 1061 | return ret; | 1049 | return ret; |
| 1062 | 1050 | ||
| @@ -1070,13 +1058,361 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
| 1070 | intel_logical_ring_advance(ringbuf); | 1058 | intel_logical_ring_advance(ringbuf); |
| 1071 | 1059 | ||
| 1072 | ring->gpu_caches_dirty = true; | 1060 | ring->gpu_caches_dirty = true; |
| 1073 | ret = logical_ring_flush_all_caches(ringbuf, ctx); | 1061 | ret = logical_ring_flush_all_caches(req); |
| 1074 | if (ret) | 1062 | if (ret) |
| 1075 | return ret; | 1063 | return ret; |
| 1076 | 1064 | ||
| 1077 | return 0; | 1065 | return 0; |
| 1078 | } | 1066 | } |
| 1079 | 1067 | ||
| 1068 | #define wa_ctx_emit(batch, index, cmd) \ | ||
| 1069 | do { \ | ||
| 1070 | int __index = (index)++; \ | ||
| 1071 | if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \ | ||
| 1072 | return -ENOSPC; \ | ||
| 1073 | } \ | ||
| 1074 | batch[__index] = (cmd); \ | ||
| 1075 | } while (0) | ||
| 1076 | |||
| 1077 | |||
| 1078 | /* | ||
| 1079 | * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after | ||
| 1080 | * PIPE_CONTROL instruction. This is required for the flush to happen correctly | ||
| 1081 | * but there is a slight complication as this is applied in WA batch where the | ||
| 1082 | * values are only initialized once so we cannot take register value at the | ||
| 1083 | * beginning and reuse it further; hence we save its value to memory, upload a | ||
| 1084 | * constant value with bit21 set and then we restore it back with the saved value. | ||
| 1085 | * To simplify the WA, a constant value is formed by using the default value | ||
| 1086 | * of this register. This shouldn't be a problem because we are only modifying | ||
| 1087 | * it for a short period and this batch in non-premptible. We can ofcourse | ||
| 1088 | * use additional instructions that read the actual value of the register | ||
| 1089 | * at that time and set our bit of interest but it makes the WA complicated. | ||
| 1090 | * | ||
| 1091 | * This WA is also required for Gen9 so extracting as a function avoids | ||
| 1092 | * code duplication. | ||
| 1093 | */ | ||
| 1094 | static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, | ||
| 1095 | uint32_t *const batch, | ||
| 1096 | uint32_t index) | ||
| 1097 | { | ||
| 1098 | uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
| 1099 | |||
| 1100 | /* | ||
| 1101 | * WaDisableLSQCROPERFforOCL:skl | ||
| 1102 | * This WA is implemented in skl_init_clock_gating() but since | ||
| 1103 | * this batch updates GEN8_L3SQCREG4 with default value we need to | ||
| 1104 | * set this bit here to retain the WA during flush. | ||
| 1105 | */ | ||
| 1106 | if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) | ||
| 1107 | l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; | ||
| 1108 | |||
| 1109 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) | | ||
| 1110 | MI_SRM_LRM_GLOBAL_GTT)); | ||
| 1111 | wa_ctx_emit(batch, index, GEN8_L3SQCREG4); | ||
| 1112 | wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); | ||
| 1113 | wa_ctx_emit(batch, index, 0); | ||
| 1114 | |||
| 1115 | wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); | ||
| 1116 | wa_ctx_emit(batch, index, GEN8_L3SQCREG4); | ||
| 1117 | wa_ctx_emit(batch, index, l3sqc4_flush); | ||
| 1118 | |||
| 1119 | wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); | ||
| 1120 | wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL | | ||
| 1121 | PIPE_CONTROL_DC_FLUSH_ENABLE)); | ||
| 1122 | wa_ctx_emit(batch, index, 0); | ||
| 1123 | wa_ctx_emit(batch, index, 0); | ||
| 1124 | wa_ctx_emit(batch, index, 0); | ||
| 1125 | wa_ctx_emit(batch, index, 0); | ||
| 1126 | |||
| 1127 | wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) | | ||
| 1128 | MI_SRM_LRM_GLOBAL_GTT)); | ||
| 1129 | wa_ctx_emit(batch, index, GEN8_L3SQCREG4); | ||
| 1130 | wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); | ||
| 1131 | wa_ctx_emit(batch, index, 0); | ||
| 1132 | |||
| 1133 | return index; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx, | ||
| 1137 | uint32_t offset, | ||
| 1138 | uint32_t start_alignment) | ||
| 1139 | { | ||
| 1140 | return wa_ctx->offset = ALIGN(offset, start_alignment); | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, | ||
| 1144 | uint32_t offset, | ||
| 1145 | uint32_t size_alignment) | ||
| 1146 | { | ||
| 1147 | wa_ctx->size = offset - wa_ctx->offset; | ||
| 1148 | |||
| 1149 | WARN(wa_ctx->size % size_alignment, | ||
| 1150 | "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n", | ||
| 1151 | wa_ctx->size, size_alignment); | ||
| 1152 | return 0; | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | /** | ||
| 1156 | * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA | ||
| 1157 | * | ||
| 1158 | * @ring: only applicable for RCS | ||
| 1159 | * @wa_ctx: structure representing wa_ctx | ||
| 1160 | * offset: specifies start of the batch, should be cache-aligned. This is updated | ||
| 1161 | * with the offset value received as input. | ||
| 1162 | * size: size of the batch in DWORDS but HW expects in terms of cachelines | ||
| 1163 | * @batch: page in which WA are loaded | ||
| 1164 | * @offset: This field specifies the start of the batch, it should be | ||
| 1165 | * cache-aligned otherwise it is adjusted accordingly. | ||
| 1166 | * Typically we only have one indirect_ctx and per_ctx batch buffer which are | ||
| 1167 | * initialized at the beginning and shared across all contexts but this field | ||
| 1168 | * helps us to have multiple batches at different offsets and select them based | ||
| 1169 | * on a criteria. At the moment this batch always start at the beginning of the page | ||
| 1170 | * and at this point we don't have multiple wa_ctx batch buffers. | ||
| 1171 | * | ||
| 1172 | * The number of WA applied are not known at the beginning; we use this field | ||
| 1173 | * to return the no of DWORDS written. | ||
| 1174 | * | ||
| 1175 | * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END | ||
| 1176 | * so it adds NOOPs as padding to make it cacheline aligned. | ||
| 1177 | * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together | ||
| 1178 | * makes a complete batch buffer. | ||
| 1179 | * | ||
| 1180 | * Return: non-zero if we exceed the PAGE_SIZE limit. | ||
| 1181 | */ | ||
| 1182 | |||
| 1183 | static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, | ||
| 1184 | struct i915_wa_ctx_bb *wa_ctx, | ||
| 1185 | uint32_t *const batch, | ||
| 1186 | uint32_t *offset) | ||
| 1187 | { | ||
| 1188 | uint32_t scratch_addr; | ||
| 1189 | uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); | ||
| 1190 | |||
| 1191 | /* WaDisableCtxRestoreArbitration:bdw,chv */ | ||
| 1192 | wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); | ||
| 1193 | |||
| 1194 | /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ | ||
| 1195 | if (IS_BROADWELL(ring->dev)) { | ||
| 1196 | index = gen8_emit_flush_coherentl3_wa(ring, batch, index); | ||
| 1197 | if (index < 0) | ||
| 1198 | return index; | ||
| 1199 | } | ||
| 1200 | |||
| 1201 | /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ | ||
| 1202 | /* Actual scratch location is at 128 bytes offset */ | ||
| 1203 | scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES; | ||
| 1204 | |||
| 1205 | wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); | ||
| 1206 | wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | | ||
| 1207 | PIPE_CONTROL_GLOBAL_GTT_IVB | | ||
| 1208 | PIPE_CONTROL_CS_STALL | | ||
| 1209 | PIPE_CONTROL_QW_WRITE)); | ||
| 1210 | wa_ctx_emit(batch, index, scratch_addr); | ||
| 1211 | wa_ctx_emit(batch, index, 0); | ||
| 1212 | wa_ctx_emit(batch, index, 0); | ||
| 1213 | wa_ctx_emit(batch, index, 0); | ||
| 1214 | |||
| 1215 | /* Pad to end of cacheline */ | ||
| 1216 | while (index % CACHELINE_DWORDS) | ||
| 1217 | wa_ctx_emit(batch, index, MI_NOOP); | ||
| 1218 | |||
| 1219 | /* | ||
| 1220 | * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because | ||
| 1221 | * execution depends on the length specified in terms of cache lines | ||
| 1222 | * in the register CTX_RCS_INDIRECT_CTX | ||
| 1223 | */ | ||
| 1224 | |||
| 1225 | return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | /** | ||
| 1229 | * gen8_init_perctx_bb() - initialize per ctx batch with WA | ||
| 1230 | * | ||
| 1231 | * @ring: only applicable for RCS | ||
| 1232 | * @wa_ctx: structure representing wa_ctx | ||
| 1233 | * offset: specifies start of the batch, should be cache-aligned. | ||
| 1234 | * size: size of the batch in DWORDS but HW expects in terms of cachelines | ||
| 1235 | * @batch: page in which WA are loaded | ||
| 1236 | * @offset: This field specifies the start of this batch. | ||
| 1237 | * This batch is started immediately after indirect_ctx batch. Since we ensure | ||
| 1238 | * that indirect_ctx ends on a cacheline this batch is aligned automatically. | ||
| 1239 | * | ||
| 1240 | * The number of DWORDS written are returned using this field. | ||
| 1241 | * | ||
| 1242 | * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding | ||
| 1243 | * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. | ||
| 1244 | */ | ||
| 1245 | static int gen8_init_perctx_bb(struct intel_engine_cs *ring, | ||
| 1246 | struct i915_wa_ctx_bb *wa_ctx, | ||
| 1247 | uint32_t *const batch, | ||
| 1248 | uint32_t *offset) | ||
| 1249 | { | ||
| 1250 | uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); | ||
| 1251 | |||
| 1252 | /* WaDisableCtxRestoreArbitration:bdw,chv */ | ||
| 1253 | wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); | ||
| 1254 | |||
| 1255 | wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); | ||
| 1256 | |||
| 1257 | return wa_ctx_end(wa_ctx, *offset = index, 1); | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, | ||
| 1261 | struct i915_wa_ctx_bb *wa_ctx, | ||
| 1262 | uint32_t *const batch, | ||
| 1263 | uint32_t *offset) | ||
| 1264 | { | ||
| 1265 | int ret; | ||
| 1266 | struct drm_device *dev = ring->dev; | ||
| 1267 | uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); | ||
| 1268 | |||
| 1269 | /* WaDisableCtxRestoreArbitration:skl,bxt */ | ||
| 1270 | if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || | ||
| 1271 | (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) | ||
| 1272 | wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); | ||
| 1273 | |||
| 1274 | /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ | ||
| 1275 | ret = gen8_emit_flush_coherentl3_wa(ring, batch, index); | ||
| 1276 | if (ret < 0) | ||
| 1277 | return ret; | ||
| 1278 | index = ret; | ||
| 1279 | |||
| 1280 | /* Pad to end of cacheline */ | ||
| 1281 | while (index % CACHELINE_DWORDS) | ||
| 1282 | wa_ctx_emit(batch, index, MI_NOOP); | ||
| 1283 | |||
| 1284 | return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); | ||
| 1285 | } | ||
| 1286 | |||
| 1287 | static int gen9_init_perctx_bb(struct intel_engine_cs *ring, | ||
| 1288 | struct i915_wa_ctx_bb *wa_ctx, | ||
| 1289 | uint32_t *const batch, | ||
| 1290 | uint32_t *offset) | ||
| 1291 | { | ||
| 1292 | struct drm_device *dev = ring->dev; | ||
| 1293 | uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); | ||
| 1294 | |||
| 1295 | /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ | ||
| 1296 | if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) || | ||
| 1297 | (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) { | ||
| 1298 | wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); | ||
| 1299 | wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); | ||
| 1300 | wa_ctx_emit(batch, index, | ||
| 1301 | _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); | ||
| 1302 | wa_ctx_emit(batch, index, MI_NOOP); | ||
| 1303 | } | ||
| 1304 | |||
| 1305 | /* WaDisableCtxRestoreArbitration:skl,bxt */ | ||
| 1306 | if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || | ||
| 1307 | (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) | ||
| 1308 | wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); | ||
| 1309 | |||
| 1310 | wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); | ||
| 1311 | |||
| 1312 | return wa_ctx_end(wa_ctx, *offset = index, 1); | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size) | ||
| 1316 | { | ||
| 1317 | int ret; | ||
| 1318 | |||
| 1319 | ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size)); | ||
| 1320 | if (!ring->wa_ctx.obj) { | ||
| 1321 | DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); | ||
| 1322 | return -ENOMEM; | ||
| 1323 | } | ||
| 1324 | |||
| 1325 | ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0); | ||
| 1326 | if (ret) { | ||
| 1327 | DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n", | ||
| 1328 | ret); | ||
| 1329 | drm_gem_object_unreference(&ring->wa_ctx.obj->base); | ||
| 1330 | return ret; | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | return 0; | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring) | ||
| 1337 | { | ||
| 1338 | if (ring->wa_ctx.obj) { | ||
| 1339 | i915_gem_object_ggtt_unpin(ring->wa_ctx.obj); | ||
| 1340 | drm_gem_object_unreference(&ring->wa_ctx.obj->base); | ||
| 1341 | ring->wa_ctx.obj = NULL; | ||
| 1342 | } | ||
| 1343 | } | ||
| 1344 | |||
| 1345 | static int intel_init_workaround_bb(struct intel_engine_cs *ring) | ||
| 1346 | { | ||
| 1347 | int ret; | ||
| 1348 | uint32_t *batch; | ||
| 1349 | uint32_t offset; | ||
| 1350 | struct page *page; | ||
| 1351 | struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; | ||
| 1352 | |||
| 1353 | WARN_ON(ring->id != RCS); | ||
| 1354 | |||
| 1355 | /* update this when WA for higher Gen are added */ | ||
| 1356 | if (INTEL_INFO(ring->dev)->gen > 9) { | ||
| 1357 | DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", | ||
| 1358 | INTEL_INFO(ring->dev)->gen); | ||
| 1359 | return 0; | ||
| 1360 | } | ||
| 1361 | |||
| 1362 | /* some WA perform writes to scratch page, ensure it is valid */ | ||
| 1363 | if (ring->scratch.obj == NULL) { | ||
| 1364 | DRM_ERROR("scratch page not allocated for %s\n", ring->name); | ||
| 1365 | return -EINVAL; | ||
| 1366 | } | ||
| 1367 | |||
| 1368 | ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE); | ||
| 1369 | if (ret) { | ||
| 1370 | DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); | ||
| 1371 | return ret; | ||
| 1372 | } | ||
| 1373 | |||
| 1374 | page = i915_gem_object_get_page(wa_ctx->obj, 0); | ||
| 1375 | batch = kmap_atomic(page); | ||
| 1376 | offset = 0; | ||
| 1377 | |||
| 1378 | if (INTEL_INFO(ring->dev)->gen == 8) { | ||
| 1379 | ret = gen8_init_indirectctx_bb(ring, | ||
| 1380 | &wa_ctx->indirect_ctx, | ||
| 1381 | batch, | ||
| 1382 | &offset); | ||
| 1383 | if (ret) | ||
| 1384 | goto out; | ||
| 1385 | |||
| 1386 | ret = gen8_init_perctx_bb(ring, | ||
| 1387 | &wa_ctx->per_ctx, | ||
| 1388 | batch, | ||
| 1389 | &offset); | ||
| 1390 | if (ret) | ||
| 1391 | goto out; | ||
| 1392 | } else if (INTEL_INFO(ring->dev)->gen == 9) { | ||
| 1393 | ret = gen9_init_indirectctx_bb(ring, | ||
| 1394 | &wa_ctx->indirect_ctx, | ||
| 1395 | batch, | ||
| 1396 | &offset); | ||
| 1397 | if (ret) | ||
| 1398 | goto out; | ||
| 1399 | |||
| 1400 | ret = gen9_init_perctx_bb(ring, | ||
| 1401 | &wa_ctx->per_ctx, | ||
| 1402 | batch, | ||
| 1403 | &offset); | ||
| 1404 | if (ret) | ||
| 1405 | goto out; | ||
| 1406 | } | ||
| 1407 | |||
| 1408 | out: | ||
| 1409 | kunmap_atomic(batch); | ||
| 1410 | if (ret) | ||
| 1411 | lrc_destroy_wa_ctx_obj(ring); | ||
| 1412 | |||
| 1413 | return ret; | ||
| 1414 | } | ||
| 1415 | |||
| 1080 | static int gen8_init_common_ring(struct intel_engine_cs *ring) | 1416 | static int gen8_init_common_ring(struct intel_engine_cs *ring) |
| 1081 | { | 1417 | { |
| 1082 | struct drm_device *dev = ring->dev; | 1418 | struct drm_device *dev = ring->dev; |
| @@ -1137,19 +1473,64 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring) | |||
| 1137 | return init_workarounds_ring(ring); | 1473 | return init_workarounds_ring(ring); |
| 1138 | } | 1474 | } |
| 1139 | 1475 | ||
| 1140 | static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, | 1476 | static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) |
| 1141 | struct intel_context *ctx, | 1477 | { |
| 1478 | struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; | ||
| 1479 | struct intel_engine_cs *ring = req->ring; | ||
| 1480 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
| 1481 | const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; | ||
| 1482 | int i, ret; | ||
| 1483 | |||
| 1484 | ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2); | ||
| 1485 | if (ret) | ||
| 1486 | return ret; | ||
| 1487 | |||
| 1488 | intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds)); | ||
| 1489 | for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { | ||
| 1490 | const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); | ||
| 1491 | |||
| 1492 | intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i)); | ||
| 1493 | intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); | ||
| 1494 | intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i)); | ||
| 1495 | intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | intel_logical_ring_emit(ringbuf, MI_NOOP); | ||
| 1499 | intel_logical_ring_advance(ringbuf); | ||
| 1500 | |||
| 1501 | return 0; | ||
| 1502 | } | ||
| 1503 | |||
| 1504 | static int gen8_emit_bb_start(struct drm_i915_gem_request *req, | ||
| 1142 | u64 offset, unsigned dispatch_flags) | 1505 | u64 offset, unsigned dispatch_flags) |
| 1143 | { | 1506 | { |
| 1507 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
| 1144 | bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); | 1508 | bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); |
| 1145 | int ret; | 1509 | int ret; |
| 1146 | 1510 | ||
| 1147 | ret = intel_logical_ring_begin(ringbuf, ctx, 4); | 1511 | /* Don't rely in hw updating PDPs, specially in lite-restore. |
| 1512 | * Ideally, we should set Force PD Restore in ctx descriptor, | ||
| 1513 | * but we can't. Force Restore would be a second option, but | ||
| 1514 | * it is unsafe in case of lite-restore (because the ctx is | ||
| 1515 | * not idle). */ | ||
| 1516 | if (req->ctx->ppgtt && | ||
| 1517 | (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { | ||
| 1518 | ret = intel_logical_ring_emit_pdps(req); | ||
| 1519 | if (ret) | ||
| 1520 | return ret; | ||
| 1521 | |||
| 1522 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); | ||
| 1523 | } | ||
| 1524 | |||
| 1525 | ret = intel_logical_ring_begin(req, 4); | ||
| 1148 | if (ret) | 1526 | if (ret) |
| 1149 | return ret; | 1527 | return ret; |
| 1150 | 1528 | ||
| 1151 | /* FIXME(BDW): Address space and security selectors. */ | 1529 | /* FIXME(BDW): Address space and security selectors. */ |
| 1152 | intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); | 1530 | intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | |
| 1531 | (ppgtt<<8) | | ||
| 1532 | (dispatch_flags & I915_DISPATCH_RS ? | ||
| 1533 | MI_BATCH_RESOURCE_STREAMER : 0)); | ||
| 1153 | intel_logical_ring_emit(ringbuf, lower_32_bits(offset)); | 1534 | intel_logical_ring_emit(ringbuf, lower_32_bits(offset)); |
| 1154 | intel_logical_ring_emit(ringbuf, upper_32_bits(offset)); | 1535 | intel_logical_ring_emit(ringbuf, upper_32_bits(offset)); |
| 1155 | intel_logical_ring_emit(ringbuf, MI_NOOP); | 1536 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
| @@ -1191,18 +1572,18 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) | |||
| 1191 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1572 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 1192 | } | 1573 | } |
| 1193 | 1574 | ||
| 1194 | static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, | 1575 | static int gen8_emit_flush(struct drm_i915_gem_request *request, |
| 1195 | struct intel_context *ctx, | ||
| 1196 | u32 invalidate_domains, | 1576 | u32 invalidate_domains, |
| 1197 | u32 unused) | 1577 | u32 unused) |
| 1198 | { | 1578 | { |
| 1579 | struct intel_ringbuffer *ringbuf = request->ringbuf; | ||
| 1199 | struct intel_engine_cs *ring = ringbuf->ring; | 1580 | struct intel_engine_cs *ring = ringbuf->ring; |
| 1200 | struct drm_device *dev = ring->dev; | 1581 | struct drm_device *dev = ring->dev; |
| 1201 | struct drm_i915_private *dev_priv = dev->dev_private; | 1582 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1202 | uint32_t cmd; | 1583 | uint32_t cmd; |
| 1203 | int ret; | 1584 | int ret; |
| 1204 | 1585 | ||
| 1205 | ret = intel_logical_ring_begin(ringbuf, ctx, 4); | 1586 | ret = intel_logical_ring_begin(request, 4); |
| 1206 | if (ret) | 1587 | if (ret) |
| 1207 | return ret; | 1588 | return ret; |
| 1208 | 1589 | ||
| @@ -1232,11 +1613,11 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, | |||
| 1232 | return 0; | 1613 | return 0; |
| 1233 | } | 1614 | } |
| 1234 | 1615 | ||
| 1235 | static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, | 1616 | static int gen8_emit_flush_render(struct drm_i915_gem_request *request, |
| 1236 | struct intel_context *ctx, | ||
| 1237 | u32 invalidate_domains, | 1617 | u32 invalidate_domains, |
| 1238 | u32 flush_domains) | 1618 | u32 flush_domains) |
| 1239 | { | 1619 | { |
| 1620 | struct intel_ringbuffer *ringbuf = request->ringbuf; | ||
| 1240 | struct intel_engine_cs *ring = ringbuf->ring; | 1621 | struct intel_engine_cs *ring = ringbuf->ring; |
| 1241 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 1622 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 1242 | bool vf_flush_wa; | 1623 | bool vf_flush_wa; |
| @@ -1268,7 +1649,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, | |||
| 1268 | vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && | 1649 | vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && |
| 1269 | flags & PIPE_CONTROL_VF_CACHE_INVALIDATE; | 1650 | flags & PIPE_CONTROL_VF_CACHE_INVALIDATE; |
| 1270 | 1651 | ||
| 1271 | ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6); | 1652 | ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); |
| 1272 | if (ret) | 1653 | if (ret) |
| 1273 | return ret; | 1654 | return ret; |
| 1274 | 1655 | ||
| @@ -1302,9 +1683,9 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) | |||
| 1302 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); | 1683 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); |
| 1303 | } | 1684 | } |
| 1304 | 1685 | ||
| 1305 | static int gen8_emit_request(struct intel_ringbuffer *ringbuf, | 1686 | static int gen8_emit_request(struct drm_i915_gem_request *request) |
| 1306 | struct drm_i915_gem_request *request) | ||
| 1307 | { | 1687 | { |
| 1688 | struct intel_ringbuffer *ringbuf = request->ringbuf; | ||
| 1308 | struct intel_engine_cs *ring = ringbuf->ring; | 1689 | struct intel_engine_cs *ring = ringbuf->ring; |
| 1309 | u32 cmd; | 1690 | u32 cmd; |
| 1310 | int ret; | 1691 | int ret; |
| @@ -1314,7 +1695,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf, | |||
| 1314 | * used as a workaround for not being allowed to do lite | 1695 | * used as a workaround for not being allowed to do lite |
| 1315 | * restore with HEAD==TAIL (WaIdleLiteRestore). | 1696 | * restore with HEAD==TAIL (WaIdleLiteRestore). |
| 1316 | */ | 1697 | */ |
| 1317 | ret = intel_logical_ring_begin(ringbuf, request->ctx, 8); | 1698 | ret = intel_logical_ring_begin(request, 8); |
| 1318 | if (ret) | 1699 | if (ret) |
| 1319 | return ret; | 1700 | return ret; |
| 1320 | 1701 | ||
| @@ -1326,11 +1707,10 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf, | |||
| 1326 | (ring->status_page.gfx_addr + | 1707 | (ring->status_page.gfx_addr + |
| 1327 | (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); | 1708 | (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); |
| 1328 | intel_logical_ring_emit(ringbuf, 0); | 1709 | intel_logical_ring_emit(ringbuf, 0); |
| 1329 | intel_logical_ring_emit(ringbuf, | 1710 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); |
| 1330 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
| 1331 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); | 1711 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
| 1332 | intel_logical_ring_emit(ringbuf, MI_NOOP); | 1712 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
| 1333 | intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request); | 1713 | intel_logical_ring_advance_and_submit(request); |
| 1334 | 1714 | ||
| 1335 | /* | 1715 | /* |
| 1336 | * Here we add two extra NOOPs as padding to avoid | 1716 | * Here we add two extra NOOPs as padding to avoid |
| @@ -1343,49 +1723,47 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf, | |||
| 1343 | return 0; | 1723 | return 0; |
| 1344 | } | 1724 | } |
| 1345 | 1725 | ||
| 1346 | static int intel_lr_context_render_state_init(struct intel_engine_cs *ring, | 1726 | static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) |
| 1347 | struct intel_context *ctx) | ||
| 1348 | { | 1727 | { |
| 1349 | struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; | ||
| 1350 | struct render_state so; | 1728 | struct render_state so; |
| 1351 | struct drm_i915_file_private *file_priv = ctx->file_priv; | ||
| 1352 | struct drm_file *file = file_priv ? file_priv->file : NULL; | ||
| 1353 | int ret; | 1729 | int ret; |
| 1354 | 1730 | ||
| 1355 | ret = i915_gem_render_state_prepare(ring, &so); | 1731 | ret = i915_gem_render_state_prepare(req->ring, &so); |
| 1356 | if (ret) | 1732 | if (ret) |
| 1357 | return ret; | 1733 | return ret; |
| 1358 | 1734 | ||
| 1359 | if (so.rodata == NULL) | 1735 | if (so.rodata == NULL) |
| 1360 | return 0; | 1736 | return 0; |
| 1361 | 1737 | ||
| 1362 | ret = ring->emit_bb_start(ringbuf, | 1738 | ret = req->ring->emit_bb_start(req, so.ggtt_offset, |
| 1363 | ctx, | 1739 | I915_DISPATCH_SECURE); |
| 1364 | so.ggtt_offset, | ||
| 1365 | I915_DISPATCH_SECURE); | ||
| 1366 | if (ret) | 1740 | if (ret) |
| 1367 | goto out; | 1741 | goto out; |
| 1368 | 1742 | ||
| 1369 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); | 1743 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req); |
| 1370 | 1744 | ||
| 1371 | ret = __i915_add_request(ring, file, so.obj); | ||
| 1372 | /* intel_logical_ring_add_request moves object to inactive if it | ||
| 1373 | * fails */ | ||
| 1374 | out: | 1745 | out: |
| 1375 | i915_gem_render_state_fini(&so); | 1746 | i915_gem_render_state_fini(&so); |
| 1376 | return ret; | 1747 | return ret; |
| 1377 | } | 1748 | } |
| 1378 | 1749 | ||
| 1379 | static int gen8_init_rcs_context(struct intel_engine_cs *ring, | 1750 | static int gen8_init_rcs_context(struct drm_i915_gem_request *req) |
| 1380 | struct intel_context *ctx) | ||
| 1381 | { | 1751 | { |
| 1382 | int ret; | 1752 | int ret; |
| 1383 | 1753 | ||
| 1384 | ret = intel_logical_ring_workarounds_emit(ring, ctx); | 1754 | ret = intel_logical_ring_workarounds_emit(req); |
| 1385 | if (ret) | 1755 | if (ret) |
| 1386 | return ret; | 1756 | return ret; |
| 1387 | 1757 | ||
| 1388 | return intel_lr_context_render_state_init(ring, ctx); | 1758 | ret = intel_rcs_context_init_mocs(req); |
| 1759 | /* | ||
| 1760 | * Failing to program the MOCS is non-fatal.The system will not | ||
| 1761 | * run at peak performance. So generate an error and carry on. | ||
| 1762 | */ | ||
| 1763 | if (ret) | ||
| 1764 | DRM_ERROR("MOCS failed to program: expect performance issues.\n"); | ||
| 1765 | |||
| 1766 | return intel_lr_context_render_state_init(req); | ||
| 1389 | } | 1767 | } |
| 1390 | 1768 | ||
| 1391 | /** | 1769 | /** |
| @@ -1405,7 +1783,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) | |||
| 1405 | 1783 | ||
| 1406 | intel_logical_ring_stop(ring); | 1784 | intel_logical_ring_stop(ring); |
| 1407 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | 1785 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); |
| 1408 | i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); | ||
| 1409 | 1786 | ||
| 1410 | if (ring->cleanup) | 1787 | if (ring->cleanup) |
| 1411 | ring->cleanup(ring); | 1788 | ring->cleanup(ring); |
| @@ -1417,6 +1794,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) | |||
| 1417 | kunmap(sg_page(ring->status_page.obj->pages->sgl)); | 1794 | kunmap(sg_page(ring->status_page.obj->pages->sgl)); |
| 1418 | ring->status_page.obj = NULL; | 1795 | ring->status_page.obj = NULL; |
| 1419 | } | 1796 | } |
| 1797 | |||
| 1798 | lrc_destroy_wa_ctx_obj(ring); | ||
| 1420 | } | 1799 | } |
| 1421 | 1800 | ||
| 1422 | static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) | 1801 | static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) |
| @@ -1476,11 +1855,28 @@ static int logical_render_ring_init(struct drm_device *dev) | |||
| 1476 | ring->emit_bb_start = gen8_emit_bb_start; | 1855 | ring->emit_bb_start = gen8_emit_bb_start; |
| 1477 | 1856 | ||
| 1478 | ring->dev = dev; | 1857 | ring->dev = dev; |
| 1479 | ret = logical_ring_init(dev, ring); | 1858 | |
| 1859 | ret = intel_init_pipe_control(ring); | ||
| 1480 | if (ret) | 1860 | if (ret) |
| 1481 | return ret; | 1861 | return ret; |
| 1482 | 1862 | ||
| 1483 | return intel_init_pipe_control(ring); | 1863 | ret = intel_init_workaround_bb(ring); |
| 1864 | if (ret) { | ||
| 1865 | /* | ||
| 1866 | * We continue even if we fail to initialize WA batch | ||
| 1867 | * because we only expect rare glitches but nothing | ||
| 1868 | * critical to prevent us from using GPU | ||
| 1869 | */ | ||
| 1870 | DRM_ERROR("WA batch buffer initialization failed: %d\n", | ||
| 1871 | ret); | ||
| 1872 | } | ||
| 1873 | |||
| 1874 | ret = logical_ring_init(dev, ring); | ||
| 1875 | if (ret) { | ||
| 1876 | lrc_destroy_wa_ctx_obj(ring); | ||
| 1877 | } | ||
| 1878 | |||
| 1879 | return ret; | ||
| 1484 | } | 1880 | } |
| 1485 | 1881 | ||
| 1486 | static int logical_bsd_ring_init(struct drm_device *dev) | 1882 | static int logical_bsd_ring_init(struct drm_device *dev) |
| @@ -1735,7 +2131,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
| 1735 | reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); | 2131 | reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); |
| 1736 | reg_state[CTX_CONTEXT_CONTROL+1] = | 2132 | reg_state[CTX_CONTEXT_CONTROL+1] = |
| 1737 | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | | 2133 | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | |
| 1738 | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); | 2134 | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | |
| 2135 | CTX_CTRL_RS_CTX_ENABLE); | ||
| 1739 | reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); | 2136 | reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); |
| 1740 | reg_state[CTX_RING_HEAD+1] = 0; | 2137 | reg_state[CTX_RING_HEAD+1] = 0; |
| 1741 | reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); | 2138 | reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); |
| @@ -1760,15 +2157,27 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
| 1760 | reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; | 2157 | reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; |
| 1761 | reg_state[CTX_SECOND_BB_STATE+1] = 0; | 2158 | reg_state[CTX_SECOND_BB_STATE+1] = 0; |
| 1762 | if (ring->id == RCS) { | 2159 | if (ring->id == RCS) { |
| 1763 | /* TODO: according to BSpec, the register state context | ||
| 1764 | * for CHV does not have these. OTOH, these registers do | ||
| 1765 | * exist in CHV. I'm waiting for a clarification */ | ||
| 1766 | reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; | 2160 | reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; |
| 1767 | reg_state[CTX_BB_PER_CTX_PTR+1] = 0; | 2161 | reg_state[CTX_BB_PER_CTX_PTR+1] = 0; |
| 1768 | reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; | 2162 | reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; |
| 1769 | reg_state[CTX_RCS_INDIRECT_CTX+1] = 0; | 2163 | reg_state[CTX_RCS_INDIRECT_CTX+1] = 0; |
| 1770 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; | 2164 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; |
| 1771 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0; | 2165 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0; |
| 2166 | if (ring->wa_ctx.obj) { | ||
| 2167 | struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; | ||
| 2168 | uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); | ||
| 2169 | |||
| 2170 | reg_state[CTX_RCS_INDIRECT_CTX+1] = | ||
| 2171 | (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | | ||
| 2172 | (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); | ||
| 2173 | |||
| 2174 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = | ||
| 2175 | CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6; | ||
| 2176 | |||
| 2177 | reg_state[CTX_BB_PER_CTX_PTR+1] = | ||
| 2178 | (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | | ||
| 2179 | 0x01; | ||
| 2180 | } | ||
| 1772 | } | 2181 | } |
| 1773 | reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); | 2182 | reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); |
| 1774 | reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; | 2183 | reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; |
| @@ -1973,13 +2382,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
| 1973 | lrc_setup_hardware_status_page(ring, ctx_obj); | 2382 | lrc_setup_hardware_status_page(ring, ctx_obj); |
| 1974 | else if (ring->id == RCS && !ctx->rcs_initialized) { | 2383 | else if (ring->id == RCS && !ctx->rcs_initialized) { |
| 1975 | if (ring->init_context) { | 2384 | if (ring->init_context) { |
| 1976 | ret = ring->init_context(ring, ctx); | 2385 | struct drm_i915_gem_request *req; |
| 2386 | |||
| 2387 | ret = i915_gem_request_alloc(ring, ctx, &req); | ||
| 2388 | if (ret) | ||
| 2389 | return ret; | ||
| 2390 | |||
| 2391 | ret = ring->init_context(req); | ||
| 1977 | if (ret) { | 2392 | if (ret) { |
| 1978 | DRM_ERROR("ring init context: %d\n", ret); | 2393 | DRM_ERROR("ring init context: %d\n", ret); |
| 2394 | i915_gem_request_cancel(req); | ||
| 1979 | ctx->engine[ring->id].ringbuf = NULL; | 2395 | ctx->engine[ring->id].ringbuf = NULL; |
| 1980 | ctx->engine[ring->id].state = NULL; | 2396 | ctx->engine[ring->id].state = NULL; |
| 1981 | goto error; | 2397 | goto error; |
| 1982 | } | 2398 | } |
| 2399 | |||
| 2400 | i915_add_request_no_flush(req); | ||
| 1983 | } | 2401 | } |
| 1984 | 2402 | ||
| 1985 | ctx->rcs_initialized = true; | 2403 | ctx->rcs_initialized = true; |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 04d3a6d8b207..64f89f9982a2 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
| @@ -32,18 +32,19 @@ | |||
| 32 | #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) | 32 | #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) |
| 33 | #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) | 33 | #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) |
| 34 | #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) | 34 | #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) |
| 35 | #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) | ||
| 35 | #define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) | 36 | #define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) |
| 36 | #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) | 37 | #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) |
| 37 | 38 | ||
| 38 | /* Logical Rings */ | 39 | /* Logical Rings */ |
| 39 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request, | 40 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
| 40 | struct intel_context *ctx); | 41 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); |
| 41 | void intel_logical_ring_stop(struct intel_engine_cs *ring); | 42 | void intel_logical_ring_stop(struct intel_engine_cs *ring); |
| 42 | void intel_logical_ring_cleanup(struct intel_engine_cs *ring); | 43 | void intel_logical_ring_cleanup(struct intel_engine_cs *ring); |
| 43 | int intel_logical_rings_init(struct drm_device *dev); | 44 | int intel_logical_rings_init(struct drm_device *dev); |
| 45 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); | ||
| 44 | 46 | ||
| 45 | int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf, | 47 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); |
| 46 | struct intel_context *ctx); | ||
| 47 | /** | 48 | /** |
| 48 | * intel_logical_ring_advance() - advance the ringbuffer tail | 49 | * intel_logical_ring_advance() - advance the ringbuffer tail |
| 49 | * @ringbuf: Ringbuffer to advance. | 50 | * @ringbuf: Ringbuffer to advance. |
| @@ -70,20 +71,16 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, | |||
| 70 | void intel_lr_context_free(struct intel_context *ctx); | 71 | void intel_lr_context_free(struct intel_context *ctx); |
| 71 | int intel_lr_context_deferred_create(struct intel_context *ctx, | 72 | int intel_lr_context_deferred_create(struct intel_context *ctx, |
| 72 | struct intel_engine_cs *ring); | 73 | struct intel_engine_cs *ring); |
| 73 | void intel_lr_context_unpin(struct intel_engine_cs *ring, | 74 | void intel_lr_context_unpin(struct drm_i915_gem_request *req); |
| 74 | struct intel_context *ctx); | ||
| 75 | void intel_lr_context_reset(struct drm_device *dev, | 75 | void intel_lr_context_reset(struct drm_device *dev, |
| 76 | struct intel_context *ctx); | 76 | struct intel_context *ctx); |
| 77 | 77 | ||
| 78 | /* Execlists */ | 78 | /* Execlists */ |
| 79 | int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); | 79 | int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); |
| 80 | int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, | 80 | struct i915_execbuffer_params; |
| 81 | struct intel_engine_cs *ring, | 81 | int intel_execlists_submission(struct i915_execbuffer_params *params, |
| 82 | struct intel_context *ctx, | ||
| 83 | struct drm_i915_gem_execbuffer2 *args, | 82 | struct drm_i915_gem_execbuffer2 *args, |
| 84 | struct list_head *vmas, | 83 | struct list_head *vmas); |
| 85 | struct drm_i915_gem_object *batch_obj, | ||
| 86 | u64 exec_start, u32 dispatch_flags); | ||
| 87 | u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); | 84 | u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); |
| 88 | 85 | ||
| 89 | void intel_lrc_irq_handler(struct intel_engine_cs *ring); | 86 | void intel_lrc_irq_handler(struct intel_engine_cs *ring); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 161ab26f81fb..cb634f48e7d9 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -239,8 +239,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder) | |||
| 239 | { | 239 | { |
| 240 | struct drm_device *dev = encoder->base.dev; | 240 | struct drm_device *dev = encoder->base.dev; |
| 241 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); | 241 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
| 242 | struct intel_connector *intel_connector = | ||
| 243 | &lvds_encoder->attached_connector->base; | ||
| 244 | struct drm_i915_private *dev_priv = dev->dev_private; | 242 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 245 | u32 ctl_reg, stat_reg; | 243 | u32 ctl_reg, stat_reg; |
| 246 | 244 | ||
| @@ -252,8 +250,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder) | |||
| 252 | stat_reg = PP_STATUS; | 250 | stat_reg = PP_STATUS; |
| 253 | } | 251 | } |
| 254 | 252 | ||
| 255 | intel_panel_disable_backlight(intel_connector); | ||
| 256 | |||
| 257 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | 253 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); |
| 258 | if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) | 254 | if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) |
| 259 | DRM_ERROR("timed out waiting for panel to power off\n"); | 255 | DRM_ERROR("timed out waiting for panel to power off\n"); |
| @@ -262,6 +258,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder) | |||
| 262 | POSTING_READ(lvds_encoder->reg); | 258 | POSTING_READ(lvds_encoder->reg); |
| 263 | } | 259 | } |
| 264 | 260 | ||
| 261 | static void gmch_disable_lvds(struct intel_encoder *encoder) | ||
| 262 | { | ||
| 263 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); | ||
| 264 | struct intel_connector *intel_connector = | ||
| 265 | &lvds_encoder->attached_connector->base; | ||
| 266 | |||
| 267 | intel_panel_disable_backlight(intel_connector); | ||
| 268 | |||
| 269 | intel_disable_lvds(encoder); | ||
| 270 | } | ||
| 271 | |||
| 272 | static void pch_disable_lvds(struct intel_encoder *encoder) | ||
| 273 | { | ||
| 274 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); | ||
| 275 | struct intel_connector *intel_connector = | ||
| 276 | &lvds_encoder->attached_connector->base; | ||
| 277 | |||
| 278 | intel_panel_disable_backlight(intel_connector); | ||
| 279 | } | ||
| 280 | |||
| 281 | static void pch_post_disable_lvds(struct intel_encoder *encoder) | ||
| 282 | { | ||
| 283 | intel_disable_lvds(encoder); | ||
| 284 | } | ||
| 285 | |||
| 265 | static enum drm_mode_status | 286 | static enum drm_mode_status |
| 266 | intel_lvds_mode_valid(struct drm_connector *connector, | 287 | intel_lvds_mode_valid(struct drm_connector *connector, |
| 267 | struct drm_display_mode *mode) | 288 | struct drm_display_mode *mode) |
| @@ -452,7 +473,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
| 452 | */ | 473 | */ |
| 453 | if (!HAS_PCH_SPLIT(dev)) { | 474 | if (!HAS_PCH_SPLIT(dev)) { |
| 454 | drm_modeset_lock_all(dev); | 475 | drm_modeset_lock_all(dev); |
| 455 | intel_modeset_setup_hw_state(dev, true); | 476 | intel_display_resume(dev); |
| 456 | drm_modeset_unlock_all(dev); | 477 | drm_modeset_unlock_all(dev); |
| 457 | } | 478 | } |
| 458 | 479 | ||
| @@ -942,12 +963,6 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 942 | if (dmi_check_system(intel_no_lvds)) | 963 | if (dmi_check_system(intel_no_lvds)) |
| 943 | return; | 964 | return; |
| 944 | 965 | ||
| 945 | pin = GMBUS_PIN_PANEL; | ||
| 946 | if (!lvds_is_present_in_vbt(dev, &pin)) { | ||
| 947 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); | ||
| 948 | return; | ||
| 949 | } | ||
| 950 | |||
| 951 | if (HAS_PCH_SPLIT(dev)) { | 966 | if (HAS_PCH_SPLIT(dev)) { |
| 952 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 967 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) |
| 953 | return; | 968 | return; |
| @@ -957,6 +972,16 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 957 | } | 972 | } |
| 958 | } | 973 | } |
| 959 | 974 | ||
| 975 | pin = GMBUS_PIN_PANEL; | ||
| 976 | if (!lvds_is_present_in_vbt(dev, &pin)) { | ||
| 977 | u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS; | ||
| 978 | if ((I915_READ(reg) & LVDS_PORT_EN) == 0) { | ||
| 979 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); | ||
| 980 | return; | ||
| 981 | } | ||
| 982 | DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n"); | ||
| 983 | } | ||
| 984 | |||
| 960 | lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); | 985 | lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); |
| 961 | if (!lvds_encoder) | 986 | if (!lvds_encoder) |
| 962 | return; | 987 | return; |
| @@ -988,7 +1013,12 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 988 | intel_encoder->enable = intel_enable_lvds; | 1013 | intel_encoder->enable = intel_enable_lvds; |
| 989 | intel_encoder->pre_enable = intel_pre_enable_lvds; | 1014 | intel_encoder->pre_enable = intel_pre_enable_lvds; |
| 990 | intel_encoder->compute_config = intel_lvds_compute_config; | 1015 | intel_encoder->compute_config = intel_lvds_compute_config; |
| 991 | intel_encoder->disable = intel_disable_lvds; | 1016 | if (HAS_PCH_SPLIT(dev_priv)) { |
| 1017 | intel_encoder->disable = pch_disable_lvds; | ||
| 1018 | intel_encoder->post_disable = pch_post_disable_lvds; | ||
| 1019 | } else { | ||
| 1020 | intel_encoder->disable = gmch_disable_lvds; | ||
| 1021 | } | ||
| 992 | intel_encoder->get_hw_state = intel_lvds_get_hw_state; | 1022 | intel_encoder->get_hw_state = intel_lvds_get_hw_state; |
| 993 | intel_encoder->get_config = intel_lvds_get_config; | 1023 | intel_encoder->get_config = intel_lvds_get_config; |
| 994 | intel_connector->get_hw_state = intel_connector_get_hw_state; | 1024 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
| @@ -1068,24 +1098,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 1068 | drm_mode_debug_printmodeline(scan); | 1098 | drm_mode_debug_printmodeline(scan); |
| 1069 | 1099 | ||
| 1070 | fixed_mode = drm_mode_duplicate(dev, scan); | 1100 | fixed_mode = drm_mode_duplicate(dev, scan); |
| 1071 | if (fixed_mode) { | 1101 | if (fixed_mode) |
| 1072 | downclock_mode = | ||
| 1073 | intel_find_panel_downclock(dev, | ||
| 1074 | fixed_mode, connector); | ||
| 1075 | if (downclock_mode != NULL && | ||
| 1076 | i915.lvds_downclock) { | ||
| 1077 | /* We found the downclock for LVDS. */ | ||
| 1078 | dev_priv->lvds_downclock_avail = true; | ||
| 1079 | dev_priv->lvds_downclock = | ||
| 1080 | downclock_mode->clock; | ||
| 1081 | DRM_DEBUG_KMS("LVDS downclock is found" | ||
| 1082 | " in EDID. Normal clock %dKhz, " | ||
| 1083 | "downclock %dKhz\n", | ||
| 1084 | fixed_mode->clock, | ||
| 1085 | dev_priv->lvds_downclock); | ||
| 1086 | } | ||
| 1087 | goto out; | 1102 | goto out; |
| 1088 | } | ||
| 1089 | } | 1103 | } |
| 1090 | } | 1104 | } |
| 1091 | 1105 | ||
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c new file mode 100644 index 000000000000..6d3c6c0a5c62 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
| @@ -0,0 +1,335 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2015 Intel Corporation | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: * | ||
| 10 | * The above copyright notice and this permission notice (including the next | ||
| 11 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 12 | * Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 19 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 20 | * SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include "intel_mocs.h" | ||
| 24 | #include "intel_lrc.h" | ||
| 25 | #include "intel_ringbuffer.h" | ||
| 26 | |||
| 27 | /* structures required */ | ||
| 28 | struct drm_i915_mocs_entry { | ||
| 29 | u32 control_value; | ||
| 30 | u16 l3cc_value; | ||
| 31 | }; | ||
| 32 | |||
| 33 | struct drm_i915_mocs_table { | ||
| 34 | u32 size; | ||
| 35 | const struct drm_i915_mocs_entry *table; | ||
| 36 | }; | ||
| 37 | |||
| 38 | /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */ | ||
| 39 | #define LE_CACHEABILITY(value) ((value) << 0) | ||
| 40 | #define LE_TGT_CACHE(value) ((value) << 2) | ||
| 41 | #define LE_LRUM(value) ((value) << 4) | ||
| 42 | #define LE_AOM(value) ((value) << 6) | ||
| 43 | #define LE_RSC(value) ((value) << 7) | ||
| 44 | #define LE_SCC(value) ((value) << 8) | ||
| 45 | #define LE_PFM(value) ((value) << 11) | ||
| 46 | #define LE_SCF(value) ((value) << 14) | ||
| 47 | |||
| 48 | /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */ | ||
| 49 | #define L3_ESC(value) ((value) << 0) | ||
| 50 | #define L3_SCC(value) ((value) << 1) | ||
| 51 | #define L3_CACHEABILITY(value) ((value) << 4) | ||
| 52 | |||
| 53 | /* Helper defines */ | ||
| 54 | #define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ | ||
| 55 | |||
| 56 | /* (e)LLC caching options */ | ||
| 57 | #define LE_PAGETABLE 0 | ||
| 58 | #define LE_UC 1 | ||
| 59 | #define LE_WT 2 | ||
| 60 | #define LE_WB 3 | ||
| 61 | |||
| 62 | /* L3 caching options */ | ||
| 63 | #define L3_DIRECT 0 | ||
| 64 | #define L3_UC 1 | ||
| 65 | #define L3_RESERVED 2 | ||
| 66 | #define L3_WB 3 | ||
| 67 | |||
| 68 | /* Target cache */ | ||
| 69 | #define ELLC 0 | ||
| 70 | #define LLC 1 | ||
| 71 | #define LLC_ELLC 2 | ||
| 72 | |||
| 73 | /* | ||
| 74 | * MOCS tables | ||
| 75 | * | ||
| 76 | * These are the MOCS tables that are programmed across all the rings. | ||
| 77 | * The control value is programmed to all the rings that support the | ||
| 78 | * MOCS registers. While the l3cc_values are only programmed to the | ||
| 79 | * LNCFCMOCS0 - LNCFCMOCS32 registers. | ||
| 80 | * | ||
| 81 | * These tables are intended to be kept reasonably consistent across | ||
| 82 | * platforms. However some of the fields are not applicable to all of | ||
| 83 | * them. | ||
| 84 | * | ||
| 85 | * Entries not part of the following tables are undefined as far as | ||
| 86 | * userspace is concerned and shouldn't be relied upon. For the time | ||
| 87 | * being they will be implicitly initialized to the strictest caching | ||
| 88 | * configuration (uncached) to guarantee forwards compatibility with | ||
| 89 | * userspace programs written against more recent kernels providing | ||
| 90 | * additional MOCS entries. | ||
| 91 | * | ||
| 92 | * NOTE: These tables MUST start with being uncached and the length | ||
| 93 | * MUST be less than 63 as the last two registers are reserved | ||
| 94 | * by the hardware. These tables are part of the kernel ABI and | ||
| 95 | * may only be updated incrementally by adding entries at the | ||
| 96 | * end. | ||
| 97 | */ | ||
| 98 | static const struct drm_i915_mocs_entry skylake_mocs_table[] = { | ||
| 99 | /* { 0x00000009, 0x0010 } */ | ||
| 100 | { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | | ||
| 101 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | ||
| 102 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, | ||
| 103 | /* { 0x00000038, 0x0030 } */ | ||
| 104 | { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | ||
| 105 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | ||
| 106 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, | ||
| 107 | /* { 0x0000003b, 0x0030 } */ | ||
| 108 | { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | ||
| 109 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | ||
| 110 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } | ||
| 111 | }; | ||
| 112 | |||
| 113 | /* NOTE: the LE_TGT_CACHE is not used on Broxton */ | ||
| 114 | static const struct drm_i915_mocs_entry broxton_mocs_table[] = { | ||
| 115 | /* { 0x00000009, 0x0010 } */ | ||
| 116 | { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | | ||
| 117 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | ||
| 118 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, | ||
| 119 | /* { 0x00000038, 0x0030 } */ | ||
| 120 | { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | ||
| 121 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | ||
| 122 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, | ||
| 123 | /* { 0x0000003b, 0x0030 } */ | ||
| 124 | { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | ||
| 125 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | ||
| 126 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } | ||
| 127 | }; | ||
| 128 | |||
| 129 | /** | ||
| 130 | * get_mocs_settings() | ||
| 131 | * @dev: DRM device. | ||
| 132 | * @table: Output table that will be made to point at appropriate | ||
| 133 | * MOCS values for the device. | ||
| 134 | * | ||
| 135 | * This function will return the values of the MOCS table that needs to | ||
| 136 | * be programmed for the platform. It will return the values that need | ||
| 137 | * to be programmed and if they need to be programmed. | ||
| 138 | * | ||
| 139 | * Return: true if there are applicable MOCS settings for the device. | ||
| 140 | */ | ||
| 141 | static bool get_mocs_settings(struct drm_device *dev, | ||
| 142 | struct drm_i915_mocs_table *table) | ||
| 143 | { | ||
| 144 | bool result = false; | ||
| 145 | |||
| 146 | if (IS_SKYLAKE(dev)) { | ||
| 147 | table->size = ARRAY_SIZE(skylake_mocs_table); | ||
| 148 | table->table = skylake_mocs_table; | ||
| 149 | result = true; | ||
| 150 | } else if (IS_BROXTON(dev)) { | ||
| 151 | table->size = ARRAY_SIZE(broxton_mocs_table); | ||
| 152 | table->table = broxton_mocs_table; | ||
| 153 | result = true; | ||
| 154 | } else { | ||
| 155 | WARN_ONCE(INTEL_INFO(dev)->gen >= 9, | ||
| 156 | "Platform that should have a MOCS table does not.\n"); | ||
| 157 | } | ||
| 158 | |||
| 159 | return result; | ||
| 160 | } | ||
| 161 | |||
| 162 | /** | ||
| 163 | * emit_mocs_control_table() - emit the mocs control table | ||
| 164 | * @req: Request to set up the MOCS table for. | ||
| 165 | * @table: The values to program into the control regs. | ||
| 166 | * @reg_base: The base for the engine that needs to be programmed. | ||
| 167 | * | ||
| 168 | * This function simply emits a MI_LOAD_REGISTER_IMM command for the | ||
| 169 | * given table starting at the given address. | ||
| 170 | * | ||
| 171 | * Return: 0 on success, otherwise the error status. | ||
| 172 | */ | ||
| 173 | static int emit_mocs_control_table(struct drm_i915_gem_request *req, | ||
| 174 | const struct drm_i915_mocs_table *table, | ||
| 175 | u32 reg_base) | ||
| 176 | { | ||
| 177 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
| 178 | unsigned int index; | ||
| 179 | int ret; | ||
| 180 | |||
| 181 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) | ||
| 182 | return -ENODEV; | ||
| 183 | |||
| 184 | ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); | ||
| 185 | if (ret) { | ||
| 186 | DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); | ||
| 187 | return ret; | ||
| 188 | } | ||
| 189 | |||
| 190 | intel_logical_ring_emit(ringbuf, | ||
| 191 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); | ||
| 192 | |||
| 193 | for (index = 0; index < table->size; index++) { | ||
| 194 | intel_logical_ring_emit(ringbuf, reg_base + index * 4); | ||
| 195 | intel_logical_ring_emit(ringbuf, | ||
| 196 | table->table[index].control_value); | ||
| 197 | } | ||
| 198 | |||
| 199 | /* | ||
| 200 | * Ok, now set the unused entries to uncached. These entries | ||
| 201 | * are officially undefined and no contract for the contents | ||
| 202 | * and settings is given for these entries. | ||
| 203 | * | ||
| 204 | * Entry 0 in the table is uncached - so we are just writing | ||
| 205 | * that value to all the used entries. | ||
| 206 | */ | ||
| 207 | for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { | ||
| 208 | intel_logical_ring_emit(ringbuf, reg_base + index * 4); | ||
| 209 | intel_logical_ring_emit(ringbuf, table->table[0].control_value); | ||
| 210 | } | ||
| 211 | |||
| 212 | intel_logical_ring_emit(ringbuf, MI_NOOP); | ||
| 213 | intel_logical_ring_advance(ringbuf); | ||
| 214 | |||
| 215 | return 0; | ||
| 216 | } | ||
| 217 | |||
| 218 | /** | ||
| 219 | * emit_mocs_l3cc_table() - emit the mocs control table | ||
| 220 | * @req: Request to set up the MOCS table for. | ||
| 221 | * @table: The values to program into the control regs. | ||
| 222 | * | ||
| 223 | * This function simply emits a MI_LOAD_REGISTER_IMM command for the | ||
| 224 | * given table starting at the given address. This register set is | ||
| 225 | * programmed in pairs. | ||
| 226 | * | ||
| 227 | * Return: 0 on success, otherwise the error status. | ||
| 228 | */ | ||
| 229 | static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | ||
| 230 | const struct drm_i915_mocs_table *table) | ||
| 231 | { | ||
| 232 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
| 233 | unsigned int count; | ||
| 234 | unsigned int i; | ||
| 235 | u32 value; | ||
| 236 | u32 filler = (table->table[0].l3cc_value & 0xffff) | | ||
| 237 | ((table->table[0].l3cc_value & 0xffff) << 16); | ||
| 238 | int ret; | ||
| 239 | |||
| 240 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) | ||
| 241 | return -ENODEV; | ||
| 242 | |||
| 243 | ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); | ||
| 244 | if (ret) { | ||
| 245 | DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); | ||
| 246 | return ret; | ||
| 247 | } | ||
| 248 | |||
| 249 | intel_logical_ring_emit(ringbuf, | ||
| 250 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); | ||
| 251 | |||
| 252 | for (i = 0, count = 0; i < table->size / 2; i++, count += 2) { | ||
| 253 | value = (table->table[count].l3cc_value & 0xffff) | | ||
| 254 | ((table->table[count + 1].l3cc_value & 0xffff) << 16); | ||
| 255 | |||
| 256 | intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); | ||
| 257 | intel_logical_ring_emit(ringbuf, value); | ||
| 258 | } | ||
| 259 | |||
| 260 | if (table->size & 0x01) { | ||
| 261 | /* Odd table size - 1 left over */ | ||
| 262 | value = (table->table[count].l3cc_value & 0xffff) | | ||
| 263 | ((table->table[0].l3cc_value & 0xffff) << 16); | ||
| 264 | } else | ||
| 265 | value = filler; | ||
| 266 | |||
| 267 | /* | ||
| 268 | * Now set the rest of the table to uncached - use entry 0 as | ||
| 269 | * this will be uncached. Leave the last pair uninitialised as | ||
| 270 | * they are reserved by the hardware. | ||
| 271 | */ | ||
| 272 | for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { | ||
| 273 | intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); | ||
| 274 | intel_logical_ring_emit(ringbuf, value); | ||
| 275 | |||
| 276 | value = filler; | ||
| 277 | } | ||
| 278 | |||
| 279 | intel_logical_ring_emit(ringbuf, MI_NOOP); | ||
| 280 | intel_logical_ring_advance(ringbuf); | ||
| 281 | |||
| 282 | return 0; | ||
| 283 | } | ||
| 284 | |||
| 285 | /** | ||
| 286 | * intel_rcs_context_init_mocs() - program the MOCS register. | ||
| 287 | * @req: Request to set up the MOCS tables for. | ||
| 288 | * | ||
| 289 | * This function will emit a batch buffer with the values required for | ||
| 290 | * programming the MOCS register values for all the currently supported | ||
| 291 | * rings. | ||
| 292 | * | ||
| 293 | * These registers are partially stored in the RCS context, so they are | ||
| 294 | * emitted at the same time so that when a context is created these registers | ||
| 295 | * are set up. These registers have to be emitted into the start of the | ||
| 296 | * context as setting the ELSP will re-init some of these registers back | ||
| 297 | * to the hw values. | ||
| 298 | * | ||
| 299 | * Return: 0 on success, otherwise the error status. | ||
| 300 | */ | ||
| 301 | int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) | ||
| 302 | { | ||
| 303 | struct drm_i915_mocs_table t; | ||
| 304 | int ret; | ||
| 305 | |||
| 306 | if (get_mocs_settings(req->ring->dev, &t)) { | ||
| 307 | /* Program the control registers */ | ||
| 308 | ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0); | ||
| 309 | if (ret) | ||
| 310 | return ret; | ||
| 311 | |||
| 312 | ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0); | ||
| 313 | if (ret) | ||
| 314 | return ret; | ||
| 315 | |||
| 316 | ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0); | ||
| 317 | if (ret) | ||
| 318 | return ret; | ||
| 319 | |||
| 320 | ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0); | ||
| 321 | if (ret) | ||
| 322 | return ret; | ||
| 323 | |||
| 324 | ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0); | ||
| 325 | if (ret) | ||
| 326 | return ret; | ||
| 327 | |||
| 328 | /* Now program the l3cc registers */ | ||
| 329 | ret = emit_mocs_l3cc_table(req, &t); | ||
| 330 | if (ret) | ||
| 331 | return ret; | ||
| 332 | } | ||
| 333 | |||
| 334 | return 0; | ||
| 335 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h new file mode 100644 index 000000000000..76e45b1748b3 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_mocs.h | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2015 Intel Corporation | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef INTEL_MOCS_H | ||
| 25 | #define INTEL_MOCS_H | ||
| 26 | |||
| 27 | /** | ||
| 28 | * DOC: Memory Objects Control State (MOCS) | ||
| 29 | * | ||
| 30 | * Motivation: | ||
| 31 | * In previous Gens the MOCS settings was a value that was set by user land as | ||
| 32 | * part of the batch. In Gen9 this has changed to be a single table (per ring) | ||
| 33 | * that all batches now reference by index instead of programming the MOCS | ||
| 34 | * directly. | ||
| 35 | * | ||
| 36 | * The one wrinkle in this is that only PART of the MOCS tables are included | ||
| 37 | * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32 | ||
| 38 | * registers). The rest are not (the settings for the other rings). | ||
| 39 | * | ||
| 40 | * This table needs to be set at system start-up because the way the table | ||
| 41 | * interacts with the contexts and the GmmLib interface. | ||
| 42 | * | ||
| 43 | * | ||
| 44 | * Implementation: | ||
| 45 | * | ||
| 46 | * The tables (one per supported platform) are defined in intel_mocs.c | ||
| 47 | * and are programmed in the first batch after the context is loaded | ||
| 48 | * (with the hardware workarounds). This will then let the usual | ||
| 49 | * context handling keep the MOCS in step. | ||
| 50 | */ | ||
| 51 | |||
| 52 | #include <drm/drmP.h> | ||
| 53 | #include "i915_drv.h" | ||
| 54 | |||
| 55 | int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); | ||
| 56 | |||
| 57 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 481337436f72..cb1c65739425 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
| @@ -25,8 +25,6 @@ | |||
| 25 | * | 25 | * |
| 26 | */ | 26 | */ |
| 27 | 27 | ||
| 28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 29 | |||
| 30 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
| 31 | #include <acpi/video.h> | 29 | #include <acpi/video.h> |
| 32 | 30 | ||
| @@ -53,6 +51,7 @@ | |||
| 53 | #define MBOX_ACPI (1<<0) | 51 | #define MBOX_ACPI (1<<0) |
| 54 | #define MBOX_SWSCI (1<<1) | 52 | #define MBOX_SWSCI (1<<1) |
| 55 | #define MBOX_ASLE (1<<2) | 53 | #define MBOX_ASLE (1<<2) |
| 54 | #define MBOX_ASLE_EXT (1<<4) | ||
| 56 | 55 | ||
| 57 | struct opregion_header { | 56 | struct opregion_header { |
| 58 | u8 signature[16]; | 57 | u8 signature[16]; |
| @@ -62,7 +61,10 @@ struct opregion_header { | |||
| 62 | u8 vbios_ver[16]; | 61 | u8 vbios_ver[16]; |
| 63 | u8 driver_ver[16]; | 62 | u8 driver_ver[16]; |
| 64 | u32 mboxes; | 63 | u32 mboxes; |
| 65 | u8 reserved[164]; | 64 | u32 driver_model; |
| 65 | u32 pcon; | ||
| 66 | u8 dver[32]; | ||
| 67 | u8 rsvd[124]; | ||
| 66 | } __packed; | 68 | } __packed; |
| 67 | 69 | ||
| 68 | /* OpRegion mailbox #1: public ACPI methods */ | 70 | /* OpRegion mailbox #1: public ACPI methods */ |
| @@ -84,7 +86,9 @@ struct opregion_acpi { | |||
| 84 | u32 evts; /* ASL supported events */ | 86 | u32 evts; /* ASL supported events */ |
| 85 | u32 cnot; /* current OS notification */ | 87 | u32 cnot; /* current OS notification */ |
| 86 | u32 nrdy; /* driver status */ | 88 | u32 nrdy; /* driver status */ |
| 87 | u8 rsvd2[60]; | 89 | u32 did2[7]; /* extended supported display devices ID list */ |
| 90 | u32 cpd2[7]; /* extended attached display devices list */ | ||
| 91 | u8 rsvd2[4]; | ||
| 88 | } __packed; | 92 | } __packed; |
| 89 | 93 | ||
| 90 | /* OpRegion mailbox #2: SWSCI */ | 94 | /* OpRegion mailbox #2: SWSCI */ |
| @@ -113,7 +117,10 @@ struct opregion_asle { | |||
| 113 | u32 pcft; /* power conservation features */ | 117 | u32 pcft; /* power conservation features */ |
| 114 | u32 srot; /* supported rotation angles */ | 118 | u32 srot; /* supported rotation angles */ |
| 115 | u32 iuer; /* IUER events */ | 119 | u32 iuer; /* IUER events */ |
| 116 | u8 rsvd[86]; | 120 | u64 fdss; |
| 121 | u32 fdsp; | ||
| 122 | u32 stat; | ||
| 123 | u8 rsvd[70]; | ||
| 117 | } __packed; | 124 | } __packed; |
| 118 | 125 | ||
| 119 | /* Driver readiness indicator */ | 126 | /* Driver readiness indicator */ |
| @@ -611,6 +618,38 @@ static struct notifier_block intel_opregion_notifier = { | |||
| 611 | * (version 3) | 618 | * (version 3) |
| 612 | */ | 619 | */ |
| 613 | 620 | ||
| 621 | static u32 get_did(struct intel_opregion *opregion, int i) | ||
| 622 | { | ||
| 623 | u32 did; | ||
| 624 | |||
| 625 | if (i < ARRAY_SIZE(opregion->acpi->didl)) { | ||
| 626 | did = ioread32(&opregion->acpi->didl[i]); | ||
| 627 | } else { | ||
| 628 | i -= ARRAY_SIZE(opregion->acpi->didl); | ||
| 629 | |||
| 630 | if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) | ||
| 631 | return 0; | ||
| 632 | |||
| 633 | did = ioread32(&opregion->acpi->did2[i]); | ||
| 634 | } | ||
| 635 | |||
| 636 | return did; | ||
| 637 | } | ||
| 638 | |||
| 639 | static void set_did(struct intel_opregion *opregion, int i, u32 val) | ||
| 640 | { | ||
| 641 | if (i < ARRAY_SIZE(opregion->acpi->didl)) { | ||
| 642 | iowrite32(val, &opregion->acpi->didl[i]); | ||
| 643 | } else { | ||
| 644 | i -= ARRAY_SIZE(opregion->acpi->didl); | ||
| 645 | |||
| 646 | if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) | ||
| 647 | return; | ||
| 648 | |||
| 649 | iowrite32(val, &opregion->acpi->did2[i]); | ||
| 650 | } | ||
| 651 | } | ||
| 652 | |||
| 614 | static void intel_didl_outputs(struct drm_device *dev) | 653 | static void intel_didl_outputs(struct drm_device *dev) |
| 615 | { | 654 | { |
| 616 | struct drm_i915_private *dev_priv = dev->dev_private; | 655 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -620,7 +659,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
| 620 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; | 659 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; |
| 621 | unsigned long long device_id; | 660 | unsigned long long device_id; |
| 622 | acpi_status status; | 661 | acpi_status status; |
| 623 | u32 temp; | 662 | u32 temp, max_outputs; |
| 624 | int i = 0; | 663 | int i = 0; |
| 625 | 664 | ||
| 626 | handle = ACPI_HANDLE(&dev->pdev->dev); | 665 | handle = ACPI_HANDLE(&dev->pdev->dev); |
| @@ -639,41 +678,50 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
| 639 | } | 678 | } |
| 640 | 679 | ||
| 641 | if (!acpi_video_bus) { | 680 | if (!acpi_video_bus) { |
| 642 | pr_warn("No ACPI video bus found\n"); | 681 | DRM_ERROR("No ACPI video bus found\n"); |
| 643 | return; | 682 | return; |
| 644 | } | 683 | } |
| 645 | 684 | ||
| 685 | /* | ||
| 686 | * In theory, did2, the extended didl, gets added at opregion version | ||
| 687 | * 3.0. In practice, however, we're supposed to set it for earlier | ||
| 688 | * versions as well, since a BIOS that doesn't understand did2 should | ||
| 689 | * not look at it anyway. Use a variable so we can tweak this if a need | ||
| 690 | * arises later. | ||
| 691 | */ | ||
| 692 | max_outputs = ARRAY_SIZE(opregion->acpi->didl) + | ||
| 693 | ARRAY_SIZE(opregion->acpi->did2); | ||
| 694 | |||
| 646 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { | 695 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { |
| 647 | if (i >= 8) { | 696 | if (i >= max_outputs) { |
| 648 | dev_dbg(&dev->pdev->dev, | 697 | DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n", |
| 649 | "More than 8 outputs detected via ACPI\n"); | 698 | max_outputs); |
| 650 | return; | 699 | return; |
| 651 | } | 700 | } |
| 652 | status = | 701 | status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR", |
| 653 | acpi_evaluate_integer(acpi_cdev->handle, "_ADR", | 702 | NULL, &device_id); |
| 654 | NULL, &device_id); | ||
| 655 | if (ACPI_SUCCESS(status)) { | 703 | if (ACPI_SUCCESS(status)) { |
| 656 | if (!device_id) | 704 | if (!device_id) |
| 657 | goto blind_set; | 705 | goto blind_set; |
| 658 | iowrite32((u32)(device_id & 0x0f0f), | 706 | set_did(opregion, i++, (u32)(device_id & 0x0f0f)); |
| 659 | &opregion->acpi->didl[i]); | ||
| 660 | i++; | ||
| 661 | } | 707 | } |
| 662 | } | 708 | } |
| 663 | 709 | ||
| 664 | end: | 710 | end: |
| 665 | /* If fewer than 8 outputs, the list must be null terminated */ | 711 | DRM_DEBUG_KMS("%d outputs detected\n", i); |
| 666 | if (i < 8) | 712 | |
| 667 | iowrite32(0, &opregion->acpi->didl[i]); | 713 | /* If fewer than max outputs, the list must be null terminated */ |
| 714 | if (i < max_outputs) | ||
| 715 | set_did(opregion, i, 0); | ||
| 668 | return; | 716 | return; |
| 669 | 717 | ||
| 670 | blind_set: | 718 | blind_set: |
| 671 | i = 0; | 719 | i = 0; |
| 672 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 720 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 673 | int output_type = ACPI_OTHER_OUTPUT; | 721 | int output_type = ACPI_OTHER_OUTPUT; |
| 674 | if (i >= 8) { | 722 | if (i >= max_outputs) { |
| 675 | dev_dbg(&dev->pdev->dev, | 723 | DRM_DEBUG_KMS("More than %u outputs in connector list\n", |
| 676 | "More than 8 outputs in connector list\n"); | 724 | max_outputs); |
| 677 | return; | 725 | return; |
| 678 | } | 726 | } |
| 679 | switch (connector->connector_type) { | 727 | switch (connector->connector_type) { |
| @@ -698,9 +746,8 @@ blind_set: | |||
| 698 | output_type = ACPI_LVDS_OUTPUT; | 746 | output_type = ACPI_LVDS_OUTPUT; |
| 699 | break; | 747 | break; |
| 700 | } | 748 | } |
| 701 | temp = ioread32(&opregion->acpi->didl[i]); | 749 | temp = get_did(opregion, i); |
| 702 | iowrite32(temp | (1<<31) | output_type | i, | 750 | set_did(opregion, i, temp | (1 << 31) | output_type | i); |
| 703 | &opregion->acpi->didl[i]); | ||
| 704 | i++; | 751 | i++; |
| 705 | } | 752 | } |
| 706 | goto end; | 753 | goto end; |
| @@ -720,7 +767,7 @@ static void intel_setup_cadls(struct drm_device *dev) | |||
| 720 | * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if | 767 | * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if |
| 721 | * there are less than eight devices. */ | 768 | * there are less than eight devices. */ |
| 722 | do { | 769 | do { |
| 723 | disp_id = ioread32(&opregion->acpi->didl[i]); | 770 | disp_id = get_did(opregion, i); |
| 724 | iowrite32(disp_id, &opregion->acpi->cadl[i]); | 771 | iowrite32(disp_id, &opregion->acpi->cadl[i]); |
| 725 | } while (++i < 8 && disp_id != 0); | 772 | } while (++i < 8 && disp_id != 0); |
| 726 | } | 773 | } |
| @@ -852,6 +899,11 @@ int intel_opregion_setup(struct drm_device *dev) | |||
| 852 | char buf[sizeof(OPREGION_SIGNATURE)]; | 899 | char buf[sizeof(OPREGION_SIGNATURE)]; |
| 853 | int err = 0; | 900 | int err = 0; |
| 854 | 901 | ||
| 902 | BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100); | ||
| 903 | BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); | ||
| 904 | BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100); | ||
| 905 | BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); | ||
| 906 | |||
| 855 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); | 907 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); |
| 856 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); | 908 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); |
| 857 | if (asls == 0) { | 909 | if (asls == 0) { |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 25c8ec697da1..444542696a2c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
| @@ -210,19 +210,14 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, | |||
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | 212 | static int intel_overlay_do_wait_request(struct intel_overlay *overlay, |
| 213 | struct drm_i915_gem_request *req, | ||
| 213 | void (*tail)(struct intel_overlay *)) | 214 | void (*tail)(struct intel_overlay *)) |
| 214 | { | 215 | { |
| 215 | struct drm_device *dev = overlay->dev; | ||
| 216 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 217 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | ||
| 218 | int ret; | 216 | int ret; |
| 219 | 217 | ||
| 220 | WARN_ON(overlay->last_flip_req); | 218 | WARN_ON(overlay->last_flip_req); |
| 221 | i915_gem_request_assign(&overlay->last_flip_req, | 219 | i915_gem_request_assign(&overlay->last_flip_req, req); |
| 222 | ring->outstanding_lazy_request); | 220 | i915_add_request(req); |
| 223 | ret = i915_add_request(ring); | ||
| 224 | if (ret) | ||
| 225 | return ret; | ||
| 226 | 221 | ||
| 227 | overlay->flip_tail = tail; | 222 | overlay->flip_tail = tail; |
| 228 | ret = i915_wait_request(overlay->last_flip_req); | 223 | ret = i915_wait_request(overlay->last_flip_req); |
| @@ -239,15 +234,22 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
| 239 | struct drm_device *dev = overlay->dev; | 234 | struct drm_device *dev = overlay->dev; |
| 240 | struct drm_i915_private *dev_priv = dev->dev_private; | 235 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 241 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | 236 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
| 237 | struct drm_i915_gem_request *req; | ||
| 242 | int ret; | 238 | int ret; |
| 243 | 239 | ||
| 244 | WARN_ON(overlay->active); | 240 | WARN_ON(overlay->active); |
| 245 | WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); | 241 | WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); |
| 246 | 242 | ||
| 247 | ret = intel_ring_begin(ring, 4); | 243 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
| 248 | if (ret) | 244 | if (ret) |
| 249 | return ret; | 245 | return ret; |
| 250 | 246 | ||
| 247 | ret = intel_ring_begin(req, 4); | ||
| 248 | if (ret) { | ||
| 249 | i915_gem_request_cancel(req); | ||
| 250 | return ret; | ||
| 251 | } | ||
| 252 | |||
| 251 | overlay->active = true; | 253 | overlay->active = true; |
| 252 | 254 | ||
| 253 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); | 255 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); |
| @@ -256,7 +258,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
| 256 | intel_ring_emit(ring, MI_NOOP); | 258 | intel_ring_emit(ring, MI_NOOP); |
| 257 | intel_ring_advance(ring); | 259 | intel_ring_advance(ring); |
| 258 | 260 | ||
| 259 | return intel_overlay_do_wait_request(overlay, NULL); | 261 | return intel_overlay_do_wait_request(overlay, req, NULL); |
| 260 | } | 262 | } |
| 261 | 263 | ||
| 262 | /* overlay needs to be enabled in OCMD reg */ | 264 | /* overlay needs to be enabled in OCMD reg */ |
| @@ -266,6 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
| 266 | struct drm_device *dev = overlay->dev; | 268 | struct drm_device *dev = overlay->dev; |
| 267 | struct drm_i915_private *dev_priv = dev->dev_private; | 269 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 268 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | 270 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
| 271 | struct drm_i915_gem_request *req; | ||
| 269 | u32 flip_addr = overlay->flip_addr; | 272 | u32 flip_addr = overlay->flip_addr; |
| 270 | u32 tmp; | 273 | u32 tmp; |
| 271 | int ret; | 274 | int ret; |
| @@ -280,18 +283,25 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
| 280 | if (tmp & (1 << 17)) | 283 | if (tmp & (1 << 17)) |
| 281 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); | 284 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); |
| 282 | 285 | ||
| 283 | ret = intel_ring_begin(ring, 2); | 286 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
| 284 | if (ret) | 287 | if (ret) |
| 285 | return ret; | 288 | return ret; |
| 286 | 289 | ||
| 290 | ret = intel_ring_begin(req, 2); | ||
| 291 | if (ret) { | ||
| 292 | i915_gem_request_cancel(req); | ||
| 293 | return ret; | ||
| 294 | } | ||
| 295 | |||
| 287 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 296 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
| 288 | intel_ring_emit(ring, flip_addr); | 297 | intel_ring_emit(ring, flip_addr); |
| 289 | intel_ring_advance(ring); | 298 | intel_ring_advance(ring); |
| 290 | 299 | ||
| 291 | WARN_ON(overlay->last_flip_req); | 300 | WARN_ON(overlay->last_flip_req); |
| 292 | i915_gem_request_assign(&overlay->last_flip_req, | 301 | i915_gem_request_assign(&overlay->last_flip_req, req); |
| 293 | ring->outstanding_lazy_request); | 302 | i915_add_request(req); |
| 294 | return i915_add_request(ring); | 303 | |
| 304 | return 0; | ||
| 295 | } | 305 | } |
| 296 | 306 | ||
| 297 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) | 307 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) |
| @@ -327,6 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
| 327 | struct drm_device *dev = overlay->dev; | 337 | struct drm_device *dev = overlay->dev; |
| 328 | struct drm_i915_private *dev_priv = dev->dev_private; | 338 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 329 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | 339 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
| 340 | struct drm_i915_gem_request *req; | ||
| 330 | u32 flip_addr = overlay->flip_addr; | 341 | u32 flip_addr = overlay->flip_addr; |
| 331 | int ret; | 342 | int ret; |
| 332 | 343 | ||
| @@ -338,10 +349,16 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
| 338 | * of the hw. Do it in both cases */ | 349 | * of the hw. Do it in both cases */ |
| 339 | flip_addr |= OFC_UPDATE; | 350 | flip_addr |= OFC_UPDATE; |
| 340 | 351 | ||
| 341 | ret = intel_ring_begin(ring, 6); | 352 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
| 342 | if (ret) | 353 | if (ret) |
| 343 | return ret; | 354 | return ret; |
| 344 | 355 | ||
| 356 | ret = intel_ring_begin(req, 6); | ||
| 357 | if (ret) { | ||
| 358 | i915_gem_request_cancel(req); | ||
| 359 | return ret; | ||
| 360 | } | ||
| 361 | |||
| 345 | /* wait for overlay to go idle */ | 362 | /* wait for overlay to go idle */ |
| 346 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 363 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
| 347 | intel_ring_emit(ring, flip_addr); | 364 | intel_ring_emit(ring, flip_addr); |
| @@ -360,7 +377,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
| 360 | } | 377 | } |
| 361 | intel_ring_advance(ring); | 378 | intel_ring_advance(ring); |
| 362 | 379 | ||
| 363 | return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail); | 380 | return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail); |
| 364 | } | 381 | } |
| 365 | 382 | ||
| 366 | /* recover from an interruption due to a signal | 383 | /* recover from an interruption due to a signal |
| @@ -404,15 +421,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
| 404 | 421 | ||
| 405 | if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { | 422 | if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { |
| 406 | /* synchronous slowpath */ | 423 | /* synchronous slowpath */ |
| 407 | ret = intel_ring_begin(ring, 2); | 424 | struct drm_i915_gem_request *req; |
| 425 | |||
| 426 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); | ||
| 408 | if (ret) | 427 | if (ret) |
| 409 | return ret; | 428 | return ret; |
| 410 | 429 | ||
| 430 | ret = intel_ring_begin(req, 2); | ||
| 431 | if (ret) { | ||
| 432 | i915_gem_request_cancel(req); | ||
| 433 | return ret; | ||
| 434 | } | ||
| 435 | |||
| 411 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 436 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
| 412 | intel_ring_emit(ring, MI_NOOP); | 437 | intel_ring_emit(ring, MI_NOOP); |
| 413 | intel_ring_advance(ring); | 438 | intel_ring_advance(ring); |
| 414 | 439 | ||
| 415 | ret = intel_overlay_do_wait_request(overlay, | 440 | ret = intel_overlay_do_wait_request(overlay, req, |
| 416 | intel_overlay_release_old_vid_tail); | 441 | intel_overlay_release_old_vid_tail); |
| 417 | if (ret) | 442 | if (ret) |
| 418 | return ret; | 443 | return ret; |
| @@ -724,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
| 724 | if (ret != 0) | 749 | if (ret != 0) |
| 725 | return ret; | 750 | return ret; |
| 726 | 751 | ||
| 727 | ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, | 752 | ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL, |
| 728 | &i915_ggtt_view_normal); | 753 | &i915_ggtt_view_normal); |
| 729 | if (ret != 0) | 754 | if (ret != 0) |
| 730 | return ret; | 755 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index eadc15cddbeb..0d3e01434860 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -59,6 +59,10 @@ static void gen9_init_clock_gating(struct drm_device *dev) | |||
| 59 | /* WaEnableLbsSlaRetryTimerDecrement:skl */ | 59 | /* WaEnableLbsSlaRetryTimerDecrement:skl */ |
| 60 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | | 60 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | |
| 61 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | 61 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); |
| 62 | |||
| 63 | /* WaDisableKillLogic:bxt,skl */ | ||
| 64 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
| 65 | ECOCHK_DIS_TLB); | ||
| 62 | } | 66 | } |
| 63 | 67 | ||
| 64 | static void skl_init_clock_gating(struct drm_device *dev) | 68 | static void skl_init_clock_gating(struct drm_device *dev) |
| @@ -91,6 +95,9 @@ static void skl_init_clock_gating(struct drm_device *dev) | |||
| 91 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); | 95 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); |
| 92 | } | 96 | } |
| 93 | 97 | ||
| 98 | /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes | ||
| 99 | * involving this register should also be added to WA batch as required. | ||
| 100 | */ | ||
| 94 | if (INTEL_REVID(dev) <= SKL_REVID_E0) | 101 | if (INTEL_REVID(dev) <= SKL_REVID_E0) |
| 95 | /* WaDisableLSQCROPERFforOCL:skl */ | 102 | /* WaDisableLSQCROPERFforOCL:skl */ |
| 96 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | 103 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | |
| @@ -334,22 +341,26 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | |||
| 334 | 341 | ||
| 335 | if (IS_VALLEYVIEW(dev)) { | 342 | if (IS_VALLEYVIEW(dev)) { |
| 336 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); | 343 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); |
| 337 | if (IS_CHERRYVIEW(dev)) | 344 | POSTING_READ(FW_BLC_SELF_VLV); |
| 338 | chv_set_memory_pm5(dev_priv, enable); | 345 | dev_priv->wm.vlv.cxsr = enable; |
| 339 | } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { | 346 | } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { |
| 340 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); | 347 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); |
| 348 | POSTING_READ(FW_BLC_SELF); | ||
| 341 | } else if (IS_PINEVIEW(dev)) { | 349 | } else if (IS_PINEVIEW(dev)) { |
| 342 | val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; | 350 | val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; |
| 343 | val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; | 351 | val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; |
| 344 | I915_WRITE(DSPFW3, val); | 352 | I915_WRITE(DSPFW3, val); |
| 353 | POSTING_READ(DSPFW3); | ||
| 345 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | 354 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { |
| 346 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : | 355 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : |
| 347 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); | 356 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); |
| 348 | I915_WRITE(FW_BLC_SELF, val); | 357 | I915_WRITE(FW_BLC_SELF, val); |
| 358 | POSTING_READ(FW_BLC_SELF); | ||
| 349 | } else if (IS_I915GM(dev)) { | 359 | } else if (IS_I915GM(dev)) { |
| 350 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : | 360 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : |
| 351 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN); | 361 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN); |
| 352 | I915_WRITE(INSTPM, val); | 362 | I915_WRITE(INSTPM, val); |
| 363 | POSTING_READ(INSTPM); | ||
| 353 | } else { | 364 | } else { |
| 354 | return; | 365 | return; |
| 355 | } | 366 | } |
| @@ -923,223 +934,484 @@ static void vlv_write_wm_values(struct intel_crtc *crtc, | |||
| 923 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); | 934 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); |
| 924 | } | 935 | } |
| 925 | 936 | ||
| 926 | POSTING_READ(DSPFW1); | 937 | /* zero (unused) WM1 watermarks */ |
| 938 | I915_WRITE(DSPFW4, 0); | ||
| 939 | I915_WRITE(DSPFW5, 0); | ||
| 940 | I915_WRITE(DSPFW6, 0); | ||
| 941 | I915_WRITE(DSPHOWM1, 0); | ||
| 927 | 942 | ||
| 928 | dev_priv->wm.vlv = *wm; | 943 | POSTING_READ(DSPFW1); |
| 929 | } | 944 | } |
| 930 | 945 | ||
| 931 | #undef FW_WM_VLV | 946 | #undef FW_WM_VLV |
| 932 | 947 | ||
| 933 | static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc, | 948 | enum vlv_wm_level { |
| 934 | struct drm_plane *plane) | 949 | VLV_WM_LEVEL_PM2, |
| 935 | { | 950 | VLV_WM_LEVEL_PM5, |
| 936 | struct drm_device *dev = crtc->dev; | 951 | VLV_WM_LEVEL_DDR_DVFS, |
| 937 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 952 | CHV_WM_NUM_LEVELS, |
| 938 | int entries, prec_mult, drain_latency, pixel_size; | 953 | VLV_WM_NUM_LEVELS = 1, |
| 939 | int clock = intel_crtc->config->base.adjusted_mode.crtc_clock; | 954 | }; |
| 940 | const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64; | ||
| 941 | |||
| 942 | /* | ||
| 943 | * FIXME the plane might have an fb | ||
| 944 | * but be invisible (eg. due to clipping) | ||
| 945 | */ | ||
| 946 | if (!intel_crtc->active || !plane->state->fb) | ||
| 947 | return 0; | ||
| 948 | 955 | ||
| 949 | if (WARN(clock == 0, "Pixel clock is zero!\n")) | 956 | /* latency must be in 0.1us units. */ |
| 950 | return 0; | 957 | static unsigned int vlv_wm_method2(unsigned int pixel_rate, |
| 958 | unsigned int pipe_htotal, | ||
| 959 | unsigned int horiz_pixels, | ||
| 960 | unsigned int bytes_per_pixel, | ||
| 961 | unsigned int latency) | ||
| 962 | { | ||
| 963 | unsigned int ret; | ||
| 951 | 964 | ||
| 952 | pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0); | 965 | ret = (latency * pixel_rate) / (pipe_htotal * 10000); |
| 966 | ret = (ret + 1) * horiz_pixels * bytes_per_pixel; | ||
| 967 | ret = DIV_ROUND_UP(ret, 64); | ||
| 953 | 968 | ||
| 954 | if (WARN(pixel_size == 0, "Pixel size is zero!\n")) | 969 | return ret; |
| 955 | return 0; | 970 | } |
| 956 | 971 | ||
| 957 | entries = DIV_ROUND_UP(clock, 1000) * pixel_size; | 972 | static void vlv_setup_wm_latency(struct drm_device *dev) |
| 973 | { | ||
| 974 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 958 | 975 | ||
| 959 | prec_mult = high_precision; | 976 | /* all latencies in usec */ |
| 960 | drain_latency = 64 * prec_mult * 4 / entries; | 977 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; |
| 961 | 978 | ||
| 962 | if (drain_latency > DRAIN_LATENCY_MASK) { | 979 | if (IS_CHERRYVIEW(dev_priv)) { |
| 963 | prec_mult /= 2; | 980 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; |
| 964 | drain_latency = 64 * prec_mult * 4 / entries; | 981 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; |
| 965 | } | 982 | } |
| 966 | |||
| 967 | if (drain_latency > DRAIN_LATENCY_MASK) | ||
| 968 | drain_latency = DRAIN_LATENCY_MASK; | ||
| 969 | |||
| 970 | return drain_latency | (prec_mult == high_precision ? | ||
| 971 | DDL_PRECISION_HIGH : DDL_PRECISION_LOW); | ||
| 972 | } | 983 | } |
| 973 | 984 | ||
| 974 | static int vlv_compute_wm(struct intel_crtc *crtc, | 985 | static uint16_t vlv_compute_wm_level(struct intel_plane *plane, |
| 975 | struct intel_plane *plane, | 986 | struct intel_crtc *crtc, |
| 976 | int fifo_size) | 987 | const struct intel_plane_state *state, |
| 988 | int level) | ||
| 977 | { | 989 | { |
| 978 | int clock, entries, pixel_size; | 990 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
| 991 | int clock, htotal, pixel_size, width, wm; | ||
| 979 | 992 | ||
| 980 | /* | 993 | if (dev_priv->wm.pri_latency[level] == 0) |
| 981 | * FIXME the plane might have an fb | 994 | return USHRT_MAX; |
| 982 | * but be invisible (eg. due to clipping) | 995 | |
| 983 | */ | 996 | if (!state->visible) |
| 984 | if (!crtc->active || !plane->base.state->fb) | ||
| 985 | return 0; | 997 | return 0; |
| 986 | 998 | ||
| 987 | pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0); | 999 | pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0); |
| 988 | clock = crtc->config->base.adjusted_mode.crtc_clock; | 1000 | clock = crtc->config->base.adjusted_mode.crtc_clock; |
| 1001 | htotal = crtc->config->base.adjusted_mode.crtc_htotal; | ||
| 1002 | width = crtc->config->pipe_src_w; | ||
| 1003 | if (WARN_ON(htotal == 0)) | ||
| 1004 | htotal = 1; | ||
| 989 | 1005 | ||
| 990 | entries = DIV_ROUND_UP(clock, 1000) * pixel_size; | 1006 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { |
| 1007 | /* | ||
| 1008 | * FIXME the formula gives values that are | ||
| 1009 | * too big for the cursor FIFO, and hence we | ||
| 1010 | * would never be able to use cursors. For | ||
| 1011 | * now just hardcode the watermark. | ||
| 1012 | */ | ||
| 1013 | wm = 63; | ||
| 1014 | } else { | ||
| 1015 | wm = vlv_wm_method2(clock, htotal, width, pixel_size, | ||
| 1016 | dev_priv->wm.pri_latency[level] * 10); | ||
| 1017 | } | ||
| 991 | 1018 | ||
| 992 | /* | 1019 | return min_t(int, wm, USHRT_MAX); |
| 993 | * Set up the watermark such that we don't start issuing memory | ||
| 994 | * requests until we are within PND's max deadline value (256us). | ||
| 995 | * Idea being to be idle as long as possible while still taking | ||
| 996 | * advatange of PND's deadline scheduling. The limit of 8 | ||
| 997 | * cachelines (used when the FIFO will anyway drain in less time | ||
| 998 | * than 256us) should match what we would be done if trickle | ||
| 999 | * feed were enabled. | ||
| 1000 | */ | ||
| 1001 | return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8); | ||
| 1002 | } | 1020 | } |
| 1003 | 1021 | ||
| 1004 | static bool vlv_compute_sr_wm(struct drm_device *dev, | 1022 | static void vlv_compute_fifo(struct intel_crtc *crtc) |
| 1005 | struct vlv_wm_values *wm) | ||
| 1006 | { | 1023 | { |
| 1007 | struct drm_i915_private *dev_priv = to_i915(dev); | 1024 | struct drm_device *dev = crtc->base.dev; |
| 1008 | struct drm_crtc *crtc; | 1025 | struct vlv_wm_state *wm_state = &crtc->wm_state; |
| 1009 | enum pipe pipe = INVALID_PIPE; | ||
| 1010 | int num_planes = 0; | ||
| 1011 | int fifo_size = 0; | ||
| 1012 | struct intel_plane *plane; | 1026 | struct intel_plane *plane; |
| 1027 | unsigned int total_rate = 0; | ||
| 1028 | const int fifo_size = 512 - 1; | ||
| 1029 | int fifo_extra, fifo_left = fifo_size; | ||
| 1013 | 1030 | ||
| 1014 | wm->sr.cursor = wm->sr.plane = 0; | 1031 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
| 1032 | struct intel_plane_state *state = | ||
| 1033 | to_intel_plane_state(plane->base.state); | ||
| 1015 | 1034 | ||
| 1016 | crtc = single_enabled_crtc(dev); | 1035 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) |
| 1017 | /* maxfifo not supported on pipe C */ | 1036 | continue; |
| 1018 | if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) { | 1037 | |
| 1019 | pipe = to_intel_crtc(crtc)->pipe; | 1038 | if (state->visible) { |
| 1020 | num_planes = !!wm->pipe[pipe].primary + | 1039 | wm_state->num_active_planes++; |
| 1021 | !!wm->pipe[pipe].sprite[0] + | 1040 | total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); |
| 1022 | !!wm->pipe[pipe].sprite[1]; | 1041 | } |
| 1023 | fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; | ||
| 1024 | } | 1042 | } |
| 1025 | 1043 | ||
| 1026 | if (fifo_size == 0 || num_planes > 1) | 1044 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
| 1027 | return false; | 1045 | struct intel_plane_state *state = |
| 1046 | to_intel_plane_state(plane->base.state); | ||
| 1047 | unsigned int rate; | ||
| 1048 | |||
| 1049 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { | ||
| 1050 | plane->wm.fifo_size = 63; | ||
| 1051 | continue; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | if (!state->visible) { | ||
| 1055 | plane->wm.fifo_size = 0; | ||
| 1056 | continue; | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0); | ||
| 1060 | plane->wm.fifo_size = fifo_size * rate / total_rate; | ||
| 1061 | fifo_left -= plane->wm.fifo_size; | ||
| 1062 | } | ||
| 1028 | 1063 | ||
| 1029 | wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc), | 1064 | fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1); |
| 1030 | to_intel_plane(crtc->cursor), 0x3f); | 1065 | |
| 1066 | /* spread the remainder evenly */ | ||
| 1067 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | ||
| 1068 | int plane_extra; | ||
| 1069 | |||
| 1070 | if (fifo_left == 0) | ||
| 1071 | break; | ||
| 1031 | 1072 | ||
| 1032 | list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) { | ||
| 1033 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) | 1073 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) |
| 1034 | continue; | 1074 | continue; |
| 1035 | 1075 | ||
| 1036 | if (plane->pipe != pipe) | 1076 | /* give it all to the first plane if none are active */ |
| 1077 | if (plane->wm.fifo_size == 0 && | ||
| 1078 | wm_state->num_active_planes) | ||
| 1079 | continue; | ||
| 1080 | |||
| 1081 | plane_extra = min(fifo_extra, fifo_left); | ||
| 1082 | plane->wm.fifo_size += plane_extra; | ||
| 1083 | fifo_left -= plane_extra; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | WARN_ON(fifo_left != 0); | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | static void vlv_invert_wms(struct intel_crtc *crtc) | ||
| 1090 | { | ||
| 1091 | struct vlv_wm_state *wm_state = &crtc->wm_state; | ||
| 1092 | int level; | ||
| 1093 | |||
| 1094 | for (level = 0; level < wm_state->num_levels; level++) { | ||
| 1095 | struct drm_device *dev = crtc->base.dev; | ||
| 1096 | const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; | ||
| 1097 | struct intel_plane *plane; | ||
| 1098 | |||
| 1099 | wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; | ||
| 1100 | wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; | ||
| 1101 | |||
| 1102 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | ||
| 1103 | switch (plane->base.type) { | ||
| 1104 | int sprite; | ||
| 1105 | case DRM_PLANE_TYPE_CURSOR: | ||
| 1106 | wm_state->wm[level].cursor = plane->wm.fifo_size - | ||
| 1107 | wm_state->wm[level].cursor; | ||
| 1108 | break; | ||
| 1109 | case DRM_PLANE_TYPE_PRIMARY: | ||
| 1110 | wm_state->wm[level].primary = plane->wm.fifo_size - | ||
| 1111 | wm_state->wm[level].primary; | ||
| 1112 | break; | ||
| 1113 | case DRM_PLANE_TYPE_OVERLAY: | ||
| 1114 | sprite = plane->plane; | ||
| 1115 | wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - | ||
| 1116 | wm_state->wm[level].sprite[sprite]; | ||
| 1117 | break; | ||
| 1118 | } | ||
| 1119 | } | ||
| 1120 | } | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | static void vlv_compute_wm(struct intel_crtc *crtc) | ||
| 1124 | { | ||
| 1125 | struct drm_device *dev = crtc->base.dev; | ||
| 1126 | struct vlv_wm_state *wm_state = &crtc->wm_state; | ||
| 1127 | struct intel_plane *plane; | ||
| 1128 | int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; | ||
| 1129 | int level; | ||
| 1130 | |||
| 1131 | memset(wm_state, 0, sizeof(*wm_state)); | ||
| 1132 | |||
| 1133 | wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; | ||
| 1134 | if (IS_CHERRYVIEW(dev)) | ||
| 1135 | wm_state->num_levels = CHV_WM_NUM_LEVELS; | ||
| 1136 | else | ||
| 1137 | wm_state->num_levels = VLV_WM_NUM_LEVELS; | ||
| 1138 | |||
| 1139 | wm_state->num_active_planes = 0; | ||
| 1140 | |||
| 1141 | vlv_compute_fifo(crtc); | ||
| 1142 | |||
| 1143 | if (wm_state->num_active_planes != 1) | ||
| 1144 | wm_state->cxsr = false; | ||
| 1145 | |||
| 1146 | if (wm_state->cxsr) { | ||
| 1147 | for (level = 0; level < wm_state->num_levels; level++) { | ||
| 1148 | wm_state->sr[level].plane = sr_fifo_size; | ||
| 1149 | wm_state->sr[level].cursor = 63; | ||
| 1150 | } | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | ||
| 1154 | struct intel_plane_state *state = | ||
| 1155 | to_intel_plane_state(plane->base.state); | ||
| 1156 | |||
| 1157 | if (!state->visible) | ||
| 1158 | continue; | ||
| 1159 | |||
| 1160 | /* normal watermarks */ | ||
| 1161 | for (level = 0; level < wm_state->num_levels; level++) { | ||
| 1162 | int wm = vlv_compute_wm_level(plane, crtc, state, level); | ||
| 1163 | int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; | ||
| 1164 | |||
| 1165 | /* hack */ | ||
| 1166 | if (WARN_ON(level == 0 && wm > max_wm)) | ||
| 1167 | wm = max_wm; | ||
| 1168 | |||
| 1169 | if (wm > plane->wm.fifo_size) | ||
| 1170 | break; | ||
| 1171 | |||
| 1172 | switch (plane->base.type) { | ||
| 1173 | int sprite; | ||
| 1174 | case DRM_PLANE_TYPE_CURSOR: | ||
| 1175 | wm_state->wm[level].cursor = wm; | ||
| 1176 | break; | ||
| 1177 | case DRM_PLANE_TYPE_PRIMARY: | ||
| 1178 | wm_state->wm[level].primary = wm; | ||
| 1179 | break; | ||
| 1180 | case DRM_PLANE_TYPE_OVERLAY: | ||
| 1181 | sprite = plane->plane; | ||
| 1182 | wm_state->wm[level].sprite[sprite] = wm; | ||
| 1183 | break; | ||
| 1184 | } | ||
| 1185 | } | ||
| 1186 | |||
| 1187 | wm_state->num_levels = level; | ||
| 1188 | |||
| 1189 | if (!wm_state->cxsr) | ||
| 1037 | continue; | 1190 | continue; |
| 1038 | 1191 | ||
| 1039 | wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc), | 1192 | /* maxfifo watermarks */ |
| 1040 | plane, fifo_size); | 1193 | switch (plane->base.type) { |
| 1041 | if (wm->sr.plane != 0) | 1194 | int sprite, level; |
| 1195 | case DRM_PLANE_TYPE_CURSOR: | ||
| 1196 | for (level = 0; level < wm_state->num_levels; level++) | ||
| 1197 | wm_state->sr[level].cursor = | ||
| 1198 | wm_state->sr[level].cursor; | ||
| 1199 | break; | ||
| 1200 | case DRM_PLANE_TYPE_PRIMARY: | ||
| 1201 | for (level = 0; level < wm_state->num_levels; level++) | ||
| 1202 | wm_state->sr[level].plane = | ||
| 1203 | min(wm_state->sr[level].plane, | ||
| 1204 | wm_state->wm[level].primary); | ||
| 1042 | break; | 1205 | break; |
| 1206 | case DRM_PLANE_TYPE_OVERLAY: | ||
| 1207 | sprite = plane->plane; | ||
| 1208 | for (level = 0; level < wm_state->num_levels; level++) | ||
| 1209 | wm_state->sr[level].plane = | ||
| 1210 | min(wm_state->sr[level].plane, | ||
| 1211 | wm_state->wm[level].sprite[sprite]); | ||
| 1212 | break; | ||
| 1213 | } | ||
| 1043 | } | 1214 | } |
| 1044 | 1215 | ||
| 1045 | return true; | 1216 | /* clear any (partially) filled invalid levels */ |
| 1217 | for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) { | ||
| 1218 | memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); | ||
| 1219 | memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | vlv_invert_wms(crtc); | ||
| 1046 | } | 1223 | } |
| 1047 | 1224 | ||
| 1048 | static void valleyview_update_wm(struct drm_crtc *crtc) | 1225 | #define VLV_FIFO(plane, value) \ |
| 1226 | (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) | ||
| 1227 | |||
| 1228 | static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) | ||
| 1049 | { | 1229 | { |
| 1050 | struct drm_device *dev = crtc->dev; | 1230 | struct drm_device *dev = crtc->base.dev; |
| 1051 | struct drm_i915_private *dev_priv = dev->dev_private; | 1231 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 1052 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1232 | struct intel_plane *plane; |
| 1053 | enum pipe pipe = intel_crtc->pipe; | 1233 | int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; |
| 1054 | bool cxsr_enabled; | ||
| 1055 | struct vlv_wm_values wm = dev_priv->wm.vlv; | ||
| 1056 | 1234 | ||
| 1057 | wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary); | 1235 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
| 1058 | wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc, | 1236 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { |
| 1059 | to_intel_plane(crtc->primary), | 1237 | WARN_ON(plane->wm.fifo_size != 63); |
| 1060 | vlv_get_fifo_size(dev, pipe, 0)); | 1238 | continue; |
| 1239 | } | ||
| 1061 | 1240 | ||
| 1062 | wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor); | 1241 | if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) |
| 1063 | wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc, | 1242 | sprite0_start = plane->wm.fifo_size; |
| 1064 | to_intel_plane(crtc->cursor), | 1243 | else if (plane->plane == 0) |
| 1065 | 0x3f); | 1244 | sprite1_start = sprite0_start + plane->wm.fifo_size; |
| 1245 | else | ||
| 1246 | fifo_size = sprite1_start + plane->wm.fifo_size; | ||
| 1247 | } | ||
| 1066 | 1248 | ||
| 1067 | cxsr_enabled = vlv_compute_sr_wm(dev, &wm); | 1249 | WARN_ON(fifo_size != 512 - 1); |
| 1068 | 1250 | ||
| 1069 | if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) | 1251 | DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n", |
| 1070 | return; | 1252 | pipe_name(crtc->pipe), sprite0_start, |
| 1253 | sprite1_start, fifo_size); | ||
| 1071 | 1254 | ||
| 1072 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " | 1255 | switch (crtc->pipe) { |
| 1073 | "SR: plane=%d, cursor=%d\n", pipe_name(pipe), | 1256 | uint32_t dsparb, dsparb2, dsparb3; |
| 1074 | wm.pipe[pipe].primary, wm.pipe[pipe].cursor, | 1257 | case PIPE_A: |
| 1075 | wm.sr.plane, wm.sr.cursor); | 1258 | dsparb = I915_READ(DSPARB); |
| 1259 | dsparb2 = I915_READ(DSPARB2); | ||
| 1076 | 1260 | ||
| 1077 | /* | 1261 | dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | |
| 1078 | * FIXME DDR DVFS introduces massive memory latencies which | 1262 | VLV_FIFO(SPRITEB, 0xff)); |
| 1079 | * are not known to system agent so any deadline specified | 1263 | dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | |
| 1080 | * by the display may not be respected. To support DDR DVFS | 1264 | VLV_FIFO(SPRITEB, sprite1_start)); |
| 1081 | * the watermark code needs to be rewritten to essentially | ||
| 1082 | * bypass deadline mechanism and rely solely on the | ||
| 1083 | * watermarks. For now disable DDR DVFS. | ||
| 1084 | */ | ||
| 1085 | if (IS_CHERRYVIEW(dev_priv)) | ||
| 1086 | chv_set_memory_dvfs(dev_priv, false); | ||
| 1087 | 1265 | ||
| 1088 | if (!cxsr_enabled) | 1266 | dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | |
| 1089 | intel_set_memory_cxsr(dev_priv, false); | 1267 | VLV_FIFO(SPRITEB_HI, 0x1)); |
| 1268 | dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | | ||
| 1269 | VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); | ||
| 1090 | 1270 | ||
| 1091 | vlv_write_wm_values(intel_crtc, &wm); | 1271 | I915_WRITE(DSPARB, dsparb); |
| 1272 | I915_WRITE(DSPARB2, dsparb2); | ||
| 1273 | break; | ||
| 1274 | case PIPE_B: | ||
| 1275 | dsparb = I915_READ(DSPARB); | ||
| 1276 | dsparb2 = I915_READ(DSPARB2); | ||
| 1092 | 1277 | ||
| 1093 | if (cxsr_enabled) | 1278 | dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | |
| 1094 | intel_set_memory_cxsr(dev_priv, true); | 1279 | VLV_FIFO(SPRITED, 0xff)); |
| 1280 | dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | | ||
| 1281 | VLV_FIFO(SPRITED, sprite1_start)); | ||
| 1282 | |||
| 1283 | dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | | ||
| 1284 | VLV_FIFO(SPRITED_HI, 0xff)); | ||
| 1285 | dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | | ||
| 1286 | VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); | ||
| 1287 | |||
| 1288 | I915_WRITE(DSPARB, dsparb); | ||
| 1289 | I915_WRITE(DSPARB2, dsparb2); | ||
| 1290 | break; | ||
| 1291 | case PIPE_C: | ||
| 1292 | dsparb3 = I915_READ(DSPARB3); | ||
| 1293 | dsparb2 = I915_READ(DSPARB2); | ||
| 1294 | |||
| 1295 | dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | | ||
| 1296 | VLV_FIFO(SPRITEF, 0xff)); | ||
| 1297 | dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | | ||
| 1298 | VLV_FIFO(SPRITEF, sprite1_start)); | ||
| 1299 | |||
| 1300 | dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | | ||
| 1301 | VLV_FIFO(SPRITEF_HI, 0xff)); | ||
| 1302 | dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | | ||
| 1303 | VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); | ||
| 1304 | |||
| 1305 | I915_WRITE(DSPARB3, dsparb3); | ||
| 1306 | I915_WRITE(DSPARB2, dsparb2); | ||
| 1307 | break; | ||
| 1308 | default: | ||
| 1309 | break; | ||
| 1310 | } | ||
| 1095 | } | 1311 | } |
| 1096 | 1312 | ||
| 1097 | static void valleyview_update_sprite_wm(struct drm_plane *plane, | 1313 | #undef VLV_FIFO |
| 1098 | struct drm_crtc *crtc, | 1314 | |
| 1099 | uint32_t sprite_width, | 1315 | static void vlv_merge_wm(struct drm_device *dev, |
| 1100 | uint32_t sprite_height, | 1316 | struct vlv_wm_values *wm) |
| 1101 | int pixel_size, | 1317 | { |
| 1102 | bool enabled, bool scaled) | 1318 | struct intel_crtc *crtc; |
| 1319 | int num_active_crtcs = 0; | ||
| 1320 | |||
| 1321 | if (IS_CHERRYVIEW(dev)) | ||
| 1322 | wm->level = VLV_WM_LEVEL_DDR_DVFS; | ||
| 1323 | else | ||
| 1324 | wm->level = VLV_WM_LEVEL_PM2; | ||
| 1325 | wm->cxsr = true; | ||
| 1326 | |||
| 1327 | for_each_intel_crtc(dev, crtc) { | ||
| 1328 | const struct vlv_wm_state *wm_state = &crtc->wm_state; | ||
| 1329 | |||
| 1330 | if (!crtc->active) | ||
| 1331 | continue; | ||
| 1332 | |||
| 1333 | if (!wm_state->cxsr) | ||
| 1334 | wm->cxsr = false; | ||
| 1335 | |||
| 1336 | num_active_crtcs++; | ||
| 1337 | wm->level = min_t(int, wm->level, wm_state->num_levels - 1); | ||
| 1338 | } | ||
| 1339 | |||
| 1340 | if (num_active_crtcs != 1) | ||
| 1341 | wm->cxsr = false; | ||
| 1342 | |||
| 1343 | if (num_active_crtcs > 1) | ||
| 1344 | wm->level = VLV_WM_LEVEL_PM2; | ||
| 1345 | |||
| 1346 | for_each_intel_crtc(dev, crtc) { | ||
| 1347 | struct vlv_wm_state *wm_state = &crtc->wm_state; | ||
| 1348 | enum pipe pipe = crtc->pipe; | ||
| 1349 | |||
| 1350 | if (!crtc->active) | ||
| 1351 | continue; | ||
| 1352 | |||
| 1353 | wm->pipe[pipe] = wm_state->wm[wm->level]; | ||
| 1354 | if (wm->cxsr) | ||
| 1355 | wm->sr = wm_state->sr[wm->level]; | ||
| 1356 | |||
| 1357 | wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; | ||
| 1358 | wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; | ||
| 1359 | wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; | ||
| 1360 | wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; | ||
| 1361 | } | ||
| 1362 | } | ||
| 1363 | |||
| 1364 | static void vlv_update_wm(struct drm_crtc *crtc) | ||
| 1103 | { | 1365 | { |
| 1104 | struct drm_device *dev = crtc->dev; | 1366 | struct drm_device *dev = crtc->dev; |
| 1105 | struct drm_i915_private *dev_priv = dev->dev_private; | 1367 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1106 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1368 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1107 | enum pipe pipe = intel_crtc->pipe; | 1369 | enum pipe pipe = intel_crtc->pipe; |
| 1108 | int sprite = to_intel_plane(plane)->plane; | 1370 | struct vlv_wm_values wm = {}; |
| 1109 | bool cxsr_enabled; | ||
| 1110 | struct vlv_wm_values wm = dev_priv->wm.vlv; | ||
| 1111 | 1371 | ||
| 1112 | if (enabled) { | 1372 | vlv_compute_wm(intel_crtc); |
| 1113 | wm.ddl[pipe].sprite[sprite] = | 1373 | vlv_merge_wm(dev, &wm); |
| 1114 | vlv_compute_drain_latency(crtc, plane); | ||
| 1115 | 1374 | ||
| 1116 | wm.pipe[pipe].sprite[sprite] = | 1375 | if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { |
| 1117 | vlv_compute_wm(intel_crtc, | 1376 | /* FIXME should be part of crtc atomic commit */ |
| 1118 | to_intel_plane(plane), | 1377 | vlv_pipe_set_fifo_size(intel_crtc); |
| 1119 | vlv_get_fifo_size(dev, pipe, sprite+1)); | 1378 | return; |
| 1120 | } else { | ||
| 1121 | wm.ddl[pipe].sprite[sprite] = 0; | ||
| 1122 | wm.pipe[pipe].sprite[sprite] = 0; | ||
| 1123 | } | 1379 | } |
| 1124 | 1380 | ||
| 1125 | cxsr_enabled = vlv_compute_sr_wm(dev, &wm); | 1381 | if (wm.level < VLV_WM_LEVEL_DDR_DVFS && |
| 1126 | 1382 | dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) | |
| 1127 | if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) | 1383 | chv_set_memory_dvfs(dev_priv, false); |
| 1128 | return; | ||
| 1129 | 1384 | ||
| 1130 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, " | 1385 | if (wm.level < VLV_WM_LEVEL_PM5 && |
| 1131 | "SR: plane=%d, cursor=%d\n", pipe_name(pipe), | 1386 | dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) |
| 1132 | sprite_name(pipe, sprite), | 1387 | chv_set_memory_pm5(dev_priv, false); |
| 1133 | wm.pipe[pipe].sprite[sprite], | ||
| 1134 | wm.sr.plane, wm.sr.cursor); | ||
| 1135 | 1388 | ||
| 1136 | if (!cxsr_enabled) | 1389 | if (!wm.cxsr && dev_priv->wm.vlv.cxsr) |
| 1137 | intel_set_memory_cxsr(dev_priv, false); | 1390 | intel_set_memory_cxsr(dev_priv, false); |
| 1138 | 1391 | ||
| 1392 | /* FIXME should be part of crtc atomic commit */ | ||
| 1393 | vlv_pipe_set_fifo_size(intel_crtc); | ||
| 1394 | |||
| 1139 | vlv_write_wm_values(intel_crtc, &wm); | 1395 | vlv_write_wm_values(intel_crtc, &wm); |
| 1140 | 1396 | ||
| 1141 | if (cxsr_enabled) | 1397 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " |
| 1398 | "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", | ||
| 1399 | pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, | ||
| 1400 | wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], | ||
| 1401 | wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); | ||
| 1402 | |||
| 1403 | if (wm.cxsr && !dev_priv->wm.vlv.cxsr) | ||
| 1142 | intel_set_memory_cxsr(dev_priv, true); | 1404 | intel_set_memory_cxsr(dev_priv, true); |
| 1405 | |||
| 1406 | if (wm.level >= VLV_WM_LEVEL_PM5 && | ||
| 1407 | dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) | ||
| 1408 | chv_set_memory_pm5(dev_priv, true); | ||
| 1409 | |||
| 1410 | if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && | ||
| 1411 | dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) | ||
| 1412 | chv_set_memory_dvfs(dev_priv, true); | ||
| 1413 | |||
| 1414 | dev_priv->wm.vlv = wm; | ||
| 1143 | } | 1415 | } |
| 1144 | 1416 | ||
| 1145 | #define single_plane_enabled(mask) is_power_of_2(mask) | 1417 | #define single_plane_enabled(mask) is_power_of_2(mask) |
| @@ -1434,23 +1706,22 @@ static void i845_update_wm(struct drm_crtc *unused_crtc) | |||
| 1434 | I915_WRITE(FW_BLC, fwater_lo); | 1706 | I915_WRITE(FW_BLC, fwater_lo); |
| 1435 | } | 1707 | } |
| 1436 | 1708 | ||
| 1437 | static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, | 1709 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) |
| 1438 | struct drm_crtc *crtc) | ||
| 1439 | { | 1710 | { |
| 1440 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 1441 | uint32_t pixel_rate; | 1711 | uint32_t pixel_rate; |
| 1442 | 1712 | ||
| 1443 | pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock; | 1713 | pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; |
| 1444 | 1714 | ||
| 1445 | /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to | 1715 | /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to |
| 1446 | * adjust the pixel_rate here. */ | 1716 | * adjust the pixel_rate here. */ |
| 1447 | 1717 | ||
| 1448 | if (intel_crtc->config->pch_pfit.enabled) { | 1718 | if (pipe_config->pch_pfit.enabled) { |
| 1449 | uint64_t pipe_w, pipe_h, pfit_w, pfit_h; | 1719 | uint64_t pipe_w, pipe_h, pfit_w, pfit_h; |
| 1450 | uint32_t pfit_size = intel_crtc->config->pch_pfit.size; | 1720 | uint32_t pfit_size = pipe_config->pch_pfit.size; |
| 1721 | |||
| 1722 | pipe_w = pipe_config->pipe_src_w; | ||
| 1723 | pipe_h = pipe_config->pipe_src_h; | ||
| 1451 | 1724 | ||
| 1452 | pipe_w = intel_crtc->config->pipe_src_w; | ||
| 1453 | pipe_h = intel_crtc->config->pipe_src_h; | ||
| 1454 | pfit_w = (pfit_size >> 16) & 0xFFFF; | 1725 | pfit_w = (pfit_size >> 16) & 0xFFFF; |
| 1455 | pfit_h = pfit_size & 0xFFFF; | 1726 | pfit_h = pfit_size & 0xFFFF; |
| 1456 | if (pipe_w < pfit_w) | 1727 | if (pipe_w < pfit_w) |
| @@ -1815,7 +2086,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) | |||
| 1815 | linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, | 2086 | linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, |
| 1816 | mode->crtc_clock); | 2087 | mode->crtc_clock); |
| 1817 | ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, | 2088 | ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, |
| 1818 | dev_priv->display.get_display_clock_speed(dev_priv->dev)); | 2089 | dev_priv->cdclk_freq); |
| 1819 | 2090 | ||
| 1820 | return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | | 2091 | return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | |
| 1821 | PIPE_WM_LINETIME_TIME(linetime); | 2092 | PIPE_WM_LINETIME_TIME(linetime); |
| @@ -2066,7 +2337,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc, | |||
| 2066 | 2337 | ||
| 2067 | p->active = true; | 2338 | p->active = true; |
| 2068 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; | 2339 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; |
| 2069 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); | 2340 | p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config); |
| 2070 | 2341 | ||
| 2071 | if (crtc->primary->state->fb) | 2342 | if (crtc->primary->state->fb) |
| 2072 | p->pri.bytes_per_pixel = | 2343 | p->pri.bytes_per_pixel = |
| @@ -2215,6 +2486,7 @@ static void ilk_wm_merge(struct drm_device *dev, | |||
| 2215 | const struct ilk_wm_maximums *max, | 2486 | const struct ilk_wm_maximums *max, |
| 2216 | struct intel_pipe_wm *merged) | 2487 | struct intel_pipe_wm *merged) |
| 2217 | { | 2488 | { |
| 2489 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2218 | int level, max_level = ilk_wm_max_level(dev); | 2490 | int level, max_level = ilk_wm_max_level(dev); |
| 2219 | int last_enabled_level = max_level; | 2491 | int last_enabled_level = max_level; |
| 2220 | 2492 | ||
| @@ -2255,7 +2527,8 @@ static void ilk_wm_merge(struct drm_device *dev, | |||
| 2255 | * What we should check here is whether FBC can be | 2527 | * What we should check here is whether FBC can be |
| 2256 | * enabled sometime later. | 2528 | * enabled sometime later. |
| 2257 | */ | 2529 | */ |
| 2258 | if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) { | 2530 | if (IS_GEN5(dev) && !merged->fbc_wm_enabled && |
| 2531 | intel_fbc_enabled(dev_priv)) { | ||
| 2259 | for (level = 2; level <= max_level; level++) { | 2532 | for (level = 2; level <= max_level; level++) { |
| 2260 | struct intel_wm_level *wm = &merged->wm[level]; | 2533 | struct intel_wm_level *wm = &merged->wm[level]; |
| 2261 | 2534 | ||
| @@ -3043,8 +3316,10 @@ skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) | |||
| 3043 | if (!to_intel_crtc(crtc)->active) | 3316 | if (!to_intel_crtc(crtc)->active) |
| 3044 | return 0; | 3317 | return 0; |
| 3045 | 3318 | ||
| 3046 | return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); | 3319 | if (WARN_ON(p->pixel_rate == 0)) |
| 3320 | return 0; | ||
| 3047 | 3321 | ||
| 3322 | return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); | ||
| 3048 | } | 3323 | } |
| 3049 | 3324 | ||
| 3050 | static void skl_compute_transition_wm(struct drm_crtc *crtc, | 3325 | static void skl_compute_transition_wm(struct drm_crtc *crtc, |
| @@ -3685,6 +3960,139 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
| 3685 | } | 3960 | } |
| 3686 | } | 3961 | } |
| 3687 | 3962 | ||
| 3963 | #define _FW_WM(value, plane) \ | ||
| 3964 | (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) | ||
| 3965 | #define _FW_WM_VLV(value, plane) \ | ||
| 3966 | (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) | ||
| 3967 | |||
| 3968 | static void vlv_read_wm_values(struct drm_i915_private *dev_priv, | ||
| 3969 | struct vlv_wm_values *wm) | ||
| 3970 | { | ||
| 3971 | enum pipe pipe; | ||
| 3972 | uint32_t tmp; | ||
| 3973 | |||
| 3974 | for_each_pipe(dev_priv, pipe) { | ||
| 3975 | tmp = I915_READ(VLV_DDL(pipe)); | ||
| 3976 | |||
| 3977 | wm->ddl[pipe].primary = | ||
| 3978 | (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | ||
| 3979 | wm->ddl[pipe].cursor = | ||
| 3980 | (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | ||
| 3981 | wm->ddl[pipe].sprite[0] = | ||
| 3982 | (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | ||
| 3983 | wm->ddl[pipe].sprite[1] = | ||
| 3984 | (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | ||
| 3985 | } | ||
| 3986 | |||
| 3987 | tmp = I915_READ(DSPFW1); | ||
| 3988 | wm->sr.plane = _FW_WM(tmp, SR); | ||
| 3989 | wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); | ||
| 3990 | wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); | ||
| 3991 | wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); | ||
| 3992 | |||
| 3993 | tmp = I915_READ(DSPFW2); | ||
| 3994 | wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); | ||
| 3995 | wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); | ||
| 3996 | wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); | ||
| 3997 | |||
| 3998 | tmp = I915_READ(DSPFW3); | ||
| 3999 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); | ||
| 4000 | |||
| 4001 | if (IS_CHERRYVIEW(dev_priv)) { | ||
| 4002 | tmp = I915_READ(DSPFW7_CHV); | ||
| 4003 | wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); | ||
| 4004 | wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); | ||
| 4005 | |||
| 4006 | tmp = I915_READ(DSPFW8_CHV); | ||
| 4007 | wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); | ||
| 4008 | wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); | ||
| 4009 | |||
| 4010 | tmp = I915_READ(DSPFW9_CHV); | ||
| 4011 | wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); | ||
| 4012 | wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); | ||
| 4013 | |||
| 4014 | tmp = I915_READ(DSPHOWM); | ||
| 4015 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | ||
| 4016 | wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; | ||
| 4017 | wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; | ||
| 4018 | wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; | ||
| 4019 | wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; | ||
| 4020 | wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | ||
| 4021 | wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; | ||
| 4022 | wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | ||
| 4023 | wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | ||
| 4024 | wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; | ||
| 4025 | } else { | ||
| 4026 | tmp = I915_READ(DSPFW7); | ||
| 4027 | wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); | ||
| 4028 | wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); | ||
| 4029 | |||
| 4030 | tmp = I915_READ(DSPHOWM); | ||
| 4031 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | ||
| 4032 | wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; | ||
| 4033 | wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | ||
| 4034 | wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; | ||
| 4035 | wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | ||
| 4036 | wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | ||
| 4037 | wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; | ||
| 4038 | } | ||
| 4039 | } | ||
| 4040 | |||
| 4041 | #undef _FW_WM | ||
| 4042 | #undef _FW_WM_VLV | ||
| 4043 | |||
| 4044 | void vlv_wm_get_hw_state(struct drm_device *dev) | ||
| 4045 | { | ||
| 4046 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
| 4047 | struct vlv_wm_values *wm = &dev_priv->wm.vlv; | ||
| 4048 | struct intel_plane *plane; | ||
| 4049 | enum pipe pipe; | ||
| 4050 | u32 val; | ||
| 4051 | |||
| 4052 | vlv_read_wm_values(dev_priv, wm); | ||
| 4053 | |||
| 4054 | for_each_intel_plane(dev, plane) { | ||
| 4055 | switch (plane->base.type) { | ||
| 4056 | int sprite; | ||
| 4057 | case DRM_PLANE_TYPE_CURSOR: | ||
| 4058 | plane->wm.fifo_size = 63; | ||
| 4059 | break; | ||
| 4060 | case DRM_PLANE_TYPE_PRIMARY: | ||
| 4061 | plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0); | ||
| 4062 | break; | ||
| 4063 | case DRM_PLANE_TYPE_OVERLAY: | ||
| 4064 | sprite = plane->plane; | ||
| 4065 | plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1); | ||
| 4066 | break; | ||
| 4067 | } | ||
| 4068 | } | ||
| 4069 | |||
| 4070 | wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; | ||
| 4071 | wm->level = VLV_WM_LEVEL_PM2; | ||
| 4072 | |||
| 4073 | if (IS_CHERRYVIEW(dev_priv)) { | ||
| 4074 | mutex_lock(&dev_priv->rps.hw_lock); | ||
| 4075 | |||
| 4076 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | ||
| 4077 | if (val & DSP_MAXFIFO_PM5_ENABLE) | ||
| 4078 | wm->level = VLV_WM_LEVEL_PM5; | ||
| 4079 | |||
| 4080 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | ||
| 4081 | if ((val & FORCE_DDR_HIGH_FREQ) == 0) | ||
| 4082 | wm->level = VLV_WM_LEVEL_DDR_DVFS; | ||
| 4083 | |||
| 4084 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
| 4085 | } | ||
| 4086 | |||
| 4087 | for_each_pipe(dev_priv, pipe) | ||
| 4088 | DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", | ||
| 4089 | pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, | ||
| 4090 | wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); | ||
| 4091 | |||
| 4092 | DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", | ||
| 4093 | wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); | ||
| 4094 | } | ||
| 4095 | |||
| 3688 | void ilk_wm_get_hw_state(struct drm_device *dev) | 4096 | void ilk_wm_get_hw_state(struct drm_device *dev) |
| 3689 | { | 4097 | { |
| 3690 | struct drm_i915_private *dev_priv = dev->dev_private; | 4098 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -3858,7 +4266,7 @@ static void ironlake_enable_drps(struct drm_device *dev) | |||
| 3858 | 4266 | ||
| 3859 | if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) | 4267 | if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) |
| 3860 | DRM_ERROR("stuck trying to change perf mode\n"); | 4268 | DRM_ERROR("stuck trying to change perf mode\n"); |
| 3861 | mdelay(1); | 4269 | msleep(1); |
| 3862 | 4270 | ||
| 3863 | ironlake_set_drps(dev, fstart); | 4271 | ironlake_set_drps(dev, fstart); |
| 3864 | 4272 | ||
| @@ -3889,10 +4297,10 @@ static void ironlake_disable_drps(struct drm_device *dev) | |||
| 3889 | 4297 | ||
| 3890 | /* Go back to the starting frequency */ | 4298 | /* Go back to the starting frequency */ |
| 3891 | ironlake_set_drps(dev, dev_priv->ips.fstart); | 4299 | ironlake_set_drps(dev, dev_priv->ips.fstart); |
| 3892 | mdelay(1); | 4300 | msleep(1); |
| 3893 | rgvswctl |= MEMCTL_CMD_STS; | 4301 | rgvswctl |= MEMCTL_CMD_STS; |
| 3894 | I915_WRITE(MEMSWCTL, rgvswctl); | 4302 | I915_WRITE(MEMSWCTL, rgvswctl); |
| 3895 | mdelay(1); | 4303 | msleep(1); |
| 3896 | 4304 | ||
| 3897 | spin_unlock_irq(&mchdev_lock); | 4305 | spin_unlock_irq(&mchdev_lock); |
| 3898 | } | 4306 | } |
| @@ -4083,14 +4491,14 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val) | |||
| 4083 | "Odd GPU freq value\n")) | 4491 | "Odd GPU freq value\n")) |
| 4084 | val &= ~1; | 4492 | val &= ~1; |
| 4085 | 4493 | ||
| 4494 | I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); | ||
| 4495 | |||
| 4086 | if (val != dev_priv->rps.cur_freq) { | 4496 | if (val != dev_priv->rps.cur_freq) { |
| 4087 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); | 4497 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); |
| 4088 | if (!IS_CHERRYVIEW(dev_priv)) | 4498 | if (!IS_CHERRYVIEW(dev_priv)) |
| 4089 | gen6_set_rps_thresholds(dev_priv, val); | 4499 | gen6_set_rps_thresholds(dev_priv, val); |
| 4090 | } | 4500 | } |
| 4091 | 4501 | ||
| 4092 | I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); | ||
| 4093 | |||
| 4094 | dev_priv->rps.cur_freq = val; | 4502 | dev_priv->rps.cur_freq = val; |
| 4095 | trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); | 4503 | trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); |
| 4096 | } | 4504 | } |
| @@ -4250,12 +4658,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) | |||
| 4250 | 4658 | ||
| 4251 | static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) | 4659 | static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) |
| 4252 | { | 4660 | { |
| 4253 | /* No RC6 before Ironlake */ | 4661 | /* No RC6 before Ironlake and code is gone for ilk. */ |
| 4254 | if (INTEL_INFO(dev)->gen < 5) | 4662 | if (INTEL_INFO(dev)->gen < 6) |
| 4255 | return 0; | ||
| 4256 | |||
| 4257 | /* RC6 is only on Ironlake mobile not on desktop */ | ||
| 4258 | if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev)) | ||
| 4259 | return 0; | 4663 | return 0; |
| 4260 | 4664 | ||
| 4261 | /* Respect the kernel parameter if it is set */ | 4665 | /* Respect the kernel parameter if it is set */ |
| @@ -4275,10 +4679,6 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) | |||
| 4275 | return enable_rc6 & mask; | 4679 | return enable_rc6 & mask; |
| 4276 | } | 4680 | } |
| 4277 | 4681 | ||
| 4278 | /* Disable RC6 on Ironlake */ | ||
| 4279 | if (INTEL_INFO(dev)->gen == 5) | ||
| 4280 | return 0; | ||
| 4281 | |||
| 4282 | if (IS_IVYBRIDGE(dev)) | 4682 | if (IS_IVYBRIDGE(dev)) |
| 4283 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); | 4683 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); |
| 4284 | 4684 | ||
| @@ -4297,25 +4697,26 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) | |||
| 4297 | u32 ddcc_status = 0; | 4697 | u32 ddcc_status = 0; |
| 4298 | int ret; | 4698 | int ret; |
| 4299 | 4699 | ||
| 4300 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
| 4301 | /* All of these values are in units of 50MHz */ | 4700 | /* All of these values are in units of 50MHz */ |
| 4302 | dev_priv->rps.cur_freq = 0; | 4701 | dev_priv->rps.cur_freq = 0; |
| 4303 | /* static values from HW: RP0 > RP1 > RPn (min_freq) */ | 4702 | /* static values from HW: RP0 > RP1 > RPn (min_freq) */ |
| 4304 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; | 4703 | if (IS_BROXTON(dev)) { |
| 4305 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; | 4704 | rp_state_cap = I915_READ(BXT_RP_STATE_CAP); |
| 4306 | dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; | 4705 | dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; |
| 4307 | if (IS_SKYLAKE(dev)) { | 4706 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; |
| 4308 | /* Store the frequency values in 16.66 MHZ units, which is | 4707 | dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; |
| 4309 | the natural hardware unit for SKL */ | 4708 | } else { |
| 4310 | dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; | 4709 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
| 4311 | dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; | 4710 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; |
| 4312 | dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; | 4711 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; |
| 4712 | dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; | ||
| 4313 | } | 4713 | } |
| 4714 | |||
| 4314 | /* hw_max = RP0 until we check for overclocking */ | 4715 | /* hw_max = RP0 until we check for overclocking */ |
| 4315 | dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; | 4716 | dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; |
| 4316 | 4717 | ||
| 4317 | dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; | 4718 | dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; |
| 4318 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 4719 | if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) { |
| 4319 | ret = sandybridge_pcode_read(dev_priv, | 4720 | ret = sandybridge_pcode_read(dev_priv, |
| 4320 | HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, | 4721 | HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, |
| 4321 | &ddcc_status); | 4722 | &ddcc_status); |
| @@ -4327,6 +4728,16 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) | |||
| 4327 | dev_priv->rps.max_freq); | 4728 | dev_priv->rps.max_freq); |
| 4328 | } | 4729 | } |
| 4329 | 4730 | ||
| 4731 | if (IS_SKYLAKE(dev)) { | ||
| 4732 | /* Store the frequency values in 16.66 MHZ units, which is | ||
| 4733 | the natural hardware unit for SKL */ | ||
| 4734 | dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; | ||
| 4735 | dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; | ||
| 4736 | dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; | ||
| 4737 | dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; | ||
| 4738 | dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; | ||
| 4739 | } | ||
| 4740 | |||
| 4330 | dev_priv->rps.idle_freq = dev_priv->rps.min_freq; | 4741 | dev_priv->rps.idle_freq = dev_priv->rps.min_freq; |
| 4331 | 4742 | ||
| 4332 | /* Preserve min/max settings in case of re-init */ | 4743 | /* Preserve min/max settings in case of re-init */ |
| @@ -4619,6 +5030,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev) | |||
| 4619 | int min_freq = 15; | 5030 | int min_freq = 15; |
| 4620 | unsigned int gpu_freq; | 5031 | unsigned int gpu_freq; |
| 4621 | unsigned int max_ia_freq, min_ring_freq; | 5032 | unsigned int max_ia_freq, min_ring_freq; |
| 5033 | unsigned int max_gpu_freq, min_gpu_freq; | ||
| 4622 | int scaling_factor = 180; | 5034 | int scaling_factor = 180; |
| 4623 | struct cpufreq_policy *policy; | 5035 | struct cpufreq_policy *policy; |
| 4624 | 5036 | ||
| @@ -4643,17 +5055,31 @@ static void __gen6_update_ring_freq(struct drm_device *dev) | |||
| 4643 | /* convert DDR frequency from units of 266.6MHz to bandwidth */ | 5055 | /* convert DDR frequency from units of 266.6MHz to bandwidth */ |
| 4644 | min_ring_freq = mult_frac(min_ring_freq, 8, 3); | 5056 | min_ring_freq = mult_frac(min_ring_freq, 8, 3); |
| 4645 | 5057 | ||
| 5058 | if (IS_SKYLAKE(dev)) { | ||
| 5059 | /* Convert GT frequency to 50 HZ units */ | ||
| 5060 | min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; | ||
| 5061 | max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; | ||
| 5062 | } else { | ||
| 5063 | min_gpu_freq = dev_priv->rps.min_freq; | ||
| 5064 | max_gpu_freq = dev_priv->rps.max_freq; | ||
| 5065 | } | ||
| 5066 | |||
| 4646 | /* | 5067 | /* |
| 4647 | * For each potential GPU frequency, load a ring frequency we'd like | 5068 | * For each potential GPU frequency, load a ring frequency we'd like |
| 4648 | * to use for memory access. We do this by specifying the IA frequency | 5069 | * to use for memory access. We do this by specifying the IA frequency |
| 4649 | * the PCU should use as a reference to determine the ring frequency. | 5070 | * the PCU should use as a reference to determine the ring frequency. |
| 4650 | */ | 5071 | */ |
| 4651 | for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq; | 5072 | for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { |
| 4652 | gpu_freq--) { | 5073 | int diff = max_gpu_freq - gpu_freq; |
| 4653 | int diff = dev_priv->rps.max_freq - gpu_freq; | ||
| 4654 | unsigned int ia_freq = 0, ring_freq = 0; | 5074 | unsigned int ia_freq = 0, ring_freq = 0; |
| 4655 | 5075 | ||
| 4656 | if (INTEL_INFO(dev)->gen >= 8) { | 5076 | if (IS_SKYLAKE(dev)) { |
| 5077 | /* | ||
| 5078 | * ring_freq = 2 * GT. ring_freq is in 100MHz units | ||
| 5079 | * No floor required for ring frequency on SKL. | ||
| 5080 | */ | ||
| 5081 | ring_freq = gpu_freq; | ||
| 5082 | } else if (INTEL_INFO(dev)->gen >= 8) { | ||
| 4657 | /* max(2 * GT, DDR). NB: GT is 50MHz units */ | 5083 | /* max(2 * GT, DDR). NB: GT is 50MHz units */ |
| 4658 | ring_freq = max(min_ring_freq, gpu_freq); | 5084 | ring_freq = max(min_ring_freq, gpu_freq); |
| 4659 | } else if (IS_HASWELL(dev)) { | 5085 | } else if (IS_HASWELL(dev)) { |
| @@ -4687,7 +5113,7 @@ void gen6_update_ring_freq(struct drm_device *dev) | |||
| 4687 | { | 5113 | { |
| 4688 | struct drm_i915_private *dev_priv = dev->dev_private; | 5114 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4689 | 5115 | ||
| 4690 | if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev)) | 5116 | if (!HAS_CORE_RING_FREQ(dev)) |
| 4691 | return; | 5117 | return; |
| 4692 | 5118 | ||
| 4693 | mutex_lock(&dev_priv->rps.hw_lock); | 5119 | mutex_lock(&dev_priv->rps.hw_lock); |
| @@ -5802,7 +6228,8 @@ static void intel_gen6_powersave_work(struct work_struct *work) | |||
| 5802 | } else if (INTEL_INFO(dev)->gen >= 9) { | 6228 | } else if (INTEL_INFO(dev)->gen >= 9) { |
| 5803 | gen9_enable_rc6(dev); | 6229 | gen9_enable_rc6(dev); |
| 5804 | gen9_enable_rps(dev); | 6230 | gen9_enable_rps(dev); |
| 5805 | __gen6_update_ring_freq(dev); | 6231 | if (IS_SKYLAKE(dev)) |
| 6232 | __gen6_update_ring_freq(dev); | ||
| 5806 | } else if (IS_BROADWELL(dev)) { | 6233 | } else if (IS_BROADWELL(dev)) { |
| 5807 | gen8_enable_rps(dev); | 6234 | gen8_enable_rps(dev); |
| 5808 | __gen6_update_ring_freq(dev); | 6235 | __gen6_update_ring_freq(dev); |
| @@ -6686,13 +7113,15 @@ void intel_init_pm(struct drm_device *dev) | |||
| 6686 | else if (INTEL_INFO(dev)->gen == 8) | 7113 | else if (INTEL_INFO(dev)->gen == 8) |
| 6687 | dev_priv->display.init_clock_gating = broadwell_init_clock_gating; | 7114 | dev_priv->display.init_clock_gating = broadwell_init_clock_gating; |
| 6688 | } else if (IS_CHERRYVIEW(dev)) { | 7115 | } else if (IS_CHERRYVIEW(dev)) { |
| 6689 | dev_priv->display.update_wm = valleyview_update_wm; | 7116 | vlv_setup_wm_latency(dev); |
| 6690 | dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; | 7117 | |
| 7118 | dev_priv->display.update_wm = vlv_update_wm; | ||
| 6691 | dev_priv->display.init_clock_gating = | 7119 | dev_priv->display.init_clock_gating = |
| 6692 | cherryview_init_clock_gating; | 7120 | cherryview_init_clock_gating; |
| 6693 | } else if (IS_VALLEYVIEW(dev)) { | 7121 | } else if (IS_VALLEYVIEW(dev)) { |
| 6694 | dev_priv->display.update_wm = valleyview_update_wm; | 7122 | vlv_setup_wm_latency(dev); |
| 6695 | dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; | 7123 | |
| 7124 | dev_priv->display.update_wm = vlv_update_wm; | ||
| 6696 | dev_priv->display.init_clock_gating = | 7125 | dev_priv->display.init_clock_gating = |
| 6697 | valleyview_init_clock_gating; | 7126 | valleyview_init_clock_gating; |
| 6698 | } else if (IS_PINEVIEW(dev)) { | 7127 | } else if (IS_PINEVIEW(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 5ee0fa57ed19..acd8ec859f71 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
| @@ -254,10 +254,13 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) | |||
| 254 | uint32_t max_sleep_time = 0x1f; | 254 | uint32_t max_sleep_time = 0x1f; |
| 255 | /* Lately it was identified that depending on panel idle frame count | 255 | /* Lately it was identified that depending on panel idle frame count |
| 256 | * calculated at HW can be off by 1. So let's use what came | 256 | * calculated at HW can be off by 1. So let's use what came |
| 257 | * from VBT + 1 and at minimum 2 to be on the safe side. | 257 | * from VBT + 1. |
| 258 | * There are also other cases where panel demands at least 4 | ||
| 259 | * but VBT is not being set. To cover these 2 cases lets use | ||
| 260 | * at least 5 when VBT isn't set to be on the safest side. | ||
| 258 | */ | 261 | */ |
| 259 | uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? | 262 | uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? |
| 260 | dev_priv->vbt.psr.idle_frames + 1 : 2; | 263 | dev_priv->vbt.psr.idle_frames + 1 : 5; |
| 261 | uint32_t val = 0x0; | 264 | uint32_t val = 0x0; |
| 262 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | 265 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
| 263 | 266 | ||
| @@ -400,7 +403,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) | |||
| 400 | 403 | ||
| 401 | /* Avoid continuous PSR exit by masking memup and hpd */ | 404 | /* Avoid continuous PSR exit by masking memup and hpd */ |
| 402 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | | 405 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | |
| 403 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); | 406 | EDP_PSR_DEBUG_MASK_HPD); |
| 404 | 407 | ||
| 405 | /* Enable PSR on the panel */ | 408 | /* Enable PSR on the panel */ |
| 406 | hsw_psr_enable_sink(intel_dp); | 409 | hsw_psr_enable_sink(intel_dp); |
| @@ -596,13 +599,15 @@ static void intel_psr_exit(struct drm_device *dev) | |||
| 596 | /** | 599 | /** |
| 597 | * intel_psr_single_frame_update - Single Frame Update | 600 | * intel_psr_single_frame_update - Single Frame Update |
| 598 | * @dev: DRM device | 601 | * @dev: DRM device |
| 602 | * @frontbuffer_bits: frontbuffer plane tracking bits | ||
| 599 | * | 603 | * |
| 600 | * Some platforms support a single frame update feature that is used to | 604 | * Some platforms support a single frame update feature that is used to |
| 601 | * send and update only one frame on Remote Frame Buffer. | 605 | * send and update only one frame on Remote Frame Buffer. |
| 602 | * So far it is only implemented for Valleyview and Cherryview because | 606 | * So far it is only implemented for Valleyview and Cherryview because |
| 603 | * hardware requires this to be done before a page flip. | 607 | * hardware requires this to be done before a page flip. |
| 604 | */ | 608 | */ |
| 605 | void intel_psr_single_frame_update(struct drm_device *dev) | 609 | void intel_psr_single_frame_update(struct drm_device *dev, |
| 610 | unsigned frontbuffer_bits) | ||
| 606 | { | 611 | { |
| 607 | struct drm_i915_private *dev_priv = dev->dev_private; | 612 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 608 | struct drm_crtc *crtc; | 613 | struct drm_crtc *crtc; |
| @@ -624,14 +629,16 @@ void intel_psr_single_frame_update(struct drm_device *dev) | |||
| 624 | 629 | ||
| 625 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | 630 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; |
| 626 | pipe = to_intel_crtc(crtc)->pipe; | 631 | pipe = to_intel_crtc(crtc)->pipe; |
| 627 | val = I915_READ(VLV_PSRCTL(pipe)); | ||
| 628 | 632 | ||
| 629 | /* | 633 | if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) { |
| 630 | * We need to set this bit before writing registers for a flip. | 634 | val = I915_READ(VLV_PSRCTL(pipe)); |
| 631 | * This bit will be self-clear when it gets to the PSR active state. | ||
| 632 | */ | ||
| 633 | I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE); | ||
| 634 | 635 | ||
| 636 | /* | ||
| 637 | * We need to set this bit before writing registers for a flip. | ||
| 638 | * This bit will be self-clear when it gets to the PSR active state. | ||
| 639 | */ | ||
| 640 | I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE); | ||
| 641 | } | ||
| 635 | mutex_unlock(&dev_priv->psr.lock); | 642 | mutex_unlock(&dev_priv->psr.lock); |
| 636 | } | 643 | } |
| 637 | 644 | ||
| @@ -648,7 +655,7 @@ void intel_psr_single_frame_update(struct drm_device *dev) | |||
| 648 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." | 655 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." |
| 649 | */ | 656 | */ |
| 650 | void intel_psr_invalidate(struct drm_device *dev, | 657 | void intel_psr_invalidate(struct drm_device *dev, |
| 651 | unsigned frontbuffer_bits) | 658 | unsigned frontbuffer_bits) |
| 652 | { | 659 | { |
| 653 | struct drm_i915_private *dev_priv = dev->dev_private; | 660 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 654 | struct drm_crtc *crtc; | 661 | struct drm_crtc *crtc; |
| @@ -663,11 +670,12 @@ void intel_psr_invalidate(struct drm_device *dev, | |||
| 663 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | 670 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; |
| 664 | pipe = to_intel_crtc(crtc)->pipe; | 671 | pipe = to_intel_crtc(crtc)->pipe; |
| 665 | 672 | ||
| 666 | intel_psr_exit(dev); | ||
| 667 | |||
| 668 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | 673 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
| 669 | |||
| 670 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; | 674 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
| 675 | |||
| 676 | if (frontbuffer_bits) | ||
| 677 | intel_psr_exit(dev); | ||
| 678 | |||
| 671 | mutex_unlock(&dev_priv->psr.lock); | 679 | mutex_unlock(&dev_priv->psr.lock); |
| 672 | } | 680 | } |
| 673 | 681 | ||
| @@ -675,6 +683,7 @@ void intel_psr_invalidate(struct drm_device *dev, | |||
| 675 | * intel_psr_flush - Flush PSR | 683 | * intel_psr_flush - Flush PSR |
| 676 | * @dev: DRM device | 684 | * @dev: DRM device |
| 677 | * @frontbuffer_bits: frontbuffer plane tracking bits | 685 | * @frontbuffer_bits: frontbuffer plane tracking bits |
| 686 | * @origin: which operation caused the flush | ||
| 678 | * | 687 | * |
| 679 | * Since the hardware frontbuffer tracking has gaps we need to integrate | 688 | * Since the hardware frontbuffer tracking has gaps we need to integrate |
| 680 | * with the software frontbuffer tracking. This function gets called every | 689 | * with the software frontbuffer tracking. This function gets called every |
| @@ -684,7 +693,7 @@ void intel_psr_invalidate(struct drm_device *dev, | |||
| 684 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. | 693 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. |
| 685 | */ | 694 | */ |
| 686 | void intel_psr_flush(struct drm_device *dev, | 695 | void intel_psr_flush(struct drm_device *dev, |
| 687 | unsigned frontbuffer_bits) | 696 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
| 688 | { | 697 | { |
| 689 | struct drm_i915_private *dev_priv = dev->dev_private; | 698 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 690 | struct drm_crtc *crtc; | 699 | struct drm_crtc *crtc; |
| @@ -698,26 +707,29 @@ void intel_psr_flush(struct drm_device *dev, | |||
| 698 | 707 | ||
| 699 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; | 708 | crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; |
| 700 | pipe = to_intel_crtc(crtc)->pipe; | 709 | pipe = to_intel_crtc(crtc)->pipe; |
| 701 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; | ||
| 702 | 710 | ||
| 703 | /* | 711 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
| 704 | * On Haswell sprite plane updates don't result in a psr invalidating | 712 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
| 705 | * signal in the hardware. Which means we need to manually fake this in | ||
| 706 | * software for all flushes, not just when we've seen a preceding | ||
| 707 | * invalidation through frontbuffer rendering. | ||
| 708 | */ | ||
| 709 | if (IS_HASWELL(dev) && | ||
| 710 | (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) | ||
| 711 | intel_psr_exit(dev); | ||
| 712 | 713 | ||
| 713 | /* | 714 | if (HAS_DDI(dev)) { |
| 714 | * On Valleyview and Cherryview we don't use hardware tracking so | 715 | /* |
| 715 | * any plane updates or cursor moves don't result in a PSR | 716 | * By definition every flush should mean invalidate + flush, |
| 716 | * invalidating. Which means we need to manually fake this in | 717 | * however on core platforms let's minimize the |
| 717 | * software for all flushes, not just when we've seen a preceding | 718 | * disable/re-enable so we can avoid the invalidate when flip |
| 718 | * invalidation through frontbuffer rendering. */ | 719 | * originated the flush. |
| 719 | if (!HAS_DDI(dev)) | 720 | */ |
| 720 | intel_psr_exit(dev); | 721 | if (frontbuffer_bits && origin != ORIGIN_FLIP) |
| 722 | intel_psr_exit(dev); | ||
| 723 | } else { | ||
| 724 | /* | ||
| 725 | * On Valleyview and Cherryview we don't use hardware tracking | ||
| 726 | * so any plane updates or cursor moves don't result in a PSR | ||
| 727 | * invalidating. Which means we need to manually fake this in | ||
| 728 | * software for all flushes. | ||
| 729 | */ | ||
| 730 | if (frontbuffer_bits) | ||
| 731 | intel_psr_exit(dev); | ||
| 732 | } | ||
| 721 | 733 | ||
| 722 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) | 734 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
| 723 | schedule_delayed_work(&dev_priv->psr.work, | 735 | schedule_delayed_work(&dev_priv->psr.work, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 3817a6f00d9e..177f7ed16cf0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -81,7 +81,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring) | |||
| 81 | return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); | 81 | return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | void __intel_ring_advance(struct intel_engine_cs *ring) | 84 | static void __intel_ring_advance(struct intel_engine_cs *ring) |
| 85 | { | 85 | { |
| 86 | struct intel_ringbuffer *ringbuf = ring->buffer; | 86 | struct intel_ringbuffer *ringbuf = ring->buffer; |
| 87 | ringbuf->tail &= ringbuf->size - 1; | 87 | ringbuf->tail &= ringbuf->size - 1; |
| @@ -91,10 +91,11 @@ void __intel_ring_advance(struct intel_engine_cs *ring) | |||
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static int | 93 | static int |
| 94 | gen2_render_ring_flush(struct intel_engine_cs *ring, | 94 | gen2_render_ring_flush(struct drm_i915_gem_request *req, |
| 95 | u32 invalidate_domains, | 95 | u32 invalidate_domains, |
| 96 | u32 flush_domains) | 96 | u32 flush_domains) |
| 97 | { | 97 | { |
| 98 | struct intel_engine_cs *ring = req->ring; | ||
| 98 | u32 cmd; | 99 | u32 cmd; |
| 99 | int ret; | 100 | int ret; |
| 100 | 101 | ||
| @@ -105,7 +106,7 @@ gen2_render_ring_flush(struct intel_engine_cs *ring, | |||
| 105 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | 106 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
| 106 | cmd |= MI_READ_FLUSH; | 107 | cmd |= MI_READ_FLUSH; |
| 107 | 108 | ||
| 108 | ret = intel_ring_begin(ring, 2); | 109 | ret = intel_ring_begin(req, 2); |
| 109 | if (ret) | 110 | if (ret) |
| 110 | return ret; | 111 | return ret; |
| 111 | 112 | ||
| @@ -117,10 +118,11 @@ gen2_render_ring_flush(struct intel_engine_cs *ring, | |||
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | static int | 120 | static int |
| 120 | gen4_render_ring_flush(struct intel_engine_cs *ring, | 121 | gen4_render_ring_flush(struct drm_i915_gem_request *req, |
| 121 | u32 invalidate_domains, | 122 | u32 invalidate_domains, |
| 122 | u32 flush_domains) | 123 | u32 flush_domains) |
| 123 | { | 124 | { |
| 125 | struct intel_engine_cs *ring = req->ring; | ||
| 124 | struct drm_device *dev = ring->dev; | 126 | struct drm_device *dev = ring->dev; |
| 125 | u32 cmd; | 127 | u32 cmd; |
| 126 | int ret; | 128 | int ret; |
| @@ -163,7 +165,7 @@ gen4_render_ring_flush(struct intel_engine_cs *ring, | |||
| 163 | (IS_G4X(dev) || IS_GEN5(dev))) | 165 | (IS_G4X(dev) || IS_GEN5(dev))) |
| 164 | cmd |= MI_INVALIDATE_ISP; | 166 | cmd |= MI_INVALIDATE_ISP; |
| 165 | 167 | ||
| 166 | ret = intel_ring_begin(ring, 2); | 168 | ret = intel_ring_begin(req, 2); |
| 167 | if (ret) | 169 | if (ret) |
| 168 | return ret; | 170 | return ret; |
| 169 | 171 | ||
| @@ -212,13 +214,13 @@ gen4_render_ring_flush(struct intel_engine_cs *ring, | |||
| 212 | * really our business. That leaves only stall at scoreboard. | 214 | * really our business. That leaves only stall at scoreboard. |
| 213 | */ | 215 | */ |
| 214 | static int | 216 | static int |
| 215 | intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) | 217 | intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) |
| 216 | { | 218 | { |
| 219 | struct intel_engine_cs *ring = req->ring; | ||
| 217 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 220 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 218 | int ret; | 221 | int ret; |
| 219 | 222 | ||
| 220 | 223 | ret = intel_ring_begin(req, 6); | |
| 221 | ret = intel_ring_begin(ring, 6); | ||
| 222 | if (ret) | 224 | if (ret) |
| 223 | return ret; | 225 | return ret; |
| 224 | 226 | ||
| @@ -231,7 +233,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) | |||
| 231 | intel_ring_emit(ring, MI_NOOP); | 233 | intel_ring_emit(ring, MI_NOOP); |
| 232 | intel_ring_advance(ring); | 234 | intel_ring_advance(ring); |
| 233 | 235 | ||
| 234 | ret = intel_ring_begin(ring, 6); | 236 | ret = intel_ring_begin(req, 6); |
| 235 | if (ret) | 237 | if (ret) |
| 236 | return ret; | 238 | return ret; |
| 237 | 239 | ||
| @@ -247,15 +249,16 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) | |||
| 247 | } | 249 | } |
| 248 | 250 | ||
| 249 | static int | 251 | static int |
| 250 | gen6_render_ring_flush(struct intel_engine_cs *ring, | 252 | gen6_render_ring_flush(struct drm_i915_gem_request *req, |
| 251 | u32 invalidate_domains, u32 flush_domains) | 253 | u32 invalidate_domains, u32 flush_domains) |
| 252 | { | 254 | { |
| 255 | struct intel_engine_cs *ring = req->ring; | ||
| 253 | u32 flags = 0; | 256 | u32 flags = 0; |
| 254 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 257 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 255 | int ret; | 258 | int ret; |
| 256 | 259 | ||
| 257 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | 260 | /* Force SNB workarounds for PIPE_CONTROL flushes */ |
| 258 | ret = intel_emit_post_sync_nonzero_flush(ring); | 261 | ret = intel_emit_post_sync_nonzero_flush(req); |
| 259 | if (ret) | 262 | if (ret) |
| 260 | return ret; | 263 | return ret; |
| 261 | 264 | ||
| @@ -285,7 +288,7 @@ gen6_render_ring_flush(struct intel_engine_cs *ring, | |||
| 285 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; | 288 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; |
| 286 | } | 289 | } |
| 287 | 290 | ||
| 288 | ret = intel_ring_begin(ring, 4); | 291 | ret = intel_ring_begin(req, 4); |
| 289 | if (ret) | 292 | if (ret) |
| 290 | return ret; | 293 | return ret; |
| 291 | 294 | ||
| @@ -299,11 +302,12 @@ gen6_render_ring_flush(struct intel_engine_cs *ring, | |||
| 299 | } | 302 | } |
| 300 | 303 | ||
| 301 | static int | 304 | static int |
| 302 | gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) | 305 | gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) |
| 303 | { | 306 | { |
| 307 | struct intel_engine_cs *ring = req->ring; | ||
| 304 | int ret; | 308 | int ret; |
| 305 | 309 | ||
| 306 | ret = intel_ring_begin(ring, 4); | 310 | ret = intel_ring_begin(req, 4); |
| 307 | if (ret) | 311 | if (ret) |
| 308 | return ret; | 312 | return ret; |
| 309 | 313 | ||
| @@ -318,9 +322,10 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) | |||
| 318 | } | 322 | } |
| 319 | 323 | ||
| 320 | static int | 324 | static int |
| 321 | gen7_render_ring_flush(struct intel_engine_cs *ring, | 325 | gen7_render_ring_flush(struct drm_i915_gem_request *req, |
| 322 | u32 invalidate_domains, u32 flush_domains) | 326 | u32 invalidate_domains, u32 flush_domains) |
| 323 | { | 327 | { |
| 328 | struct intel_engine_cs *ring = req->ring; | ||
| 324 | u32 flags = 0; | 329 | u32 flags = 0; |
| 325 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 330 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 326 | int ret; | 331 | int ret; |
| @@ -362,10 +367,10 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, | |||
| 362 | /* Workaround: we must issue a pipe_control with CS-stall bit | 367 | /* Workaround: we must issue a pipe_control with CS-stall bit |
| 363 | * set before a pipe_control command that has the state cache | 368 | * set before a pipe_control command that has the state cache |
| 364 | * invalidate bit set. */ | 369 | * invalidate bit set. */ |
| 365 | gen7_render_ring_cs_stall_wa(ring); | 370 | gen7_render_ring_cs_stall_wa(req); |
| 366 | } | 371 | } |
| 367 | 372 | ||
| 368 | ret = intel_ring_begin(ring, 4); | 373 | ret = intel_ring_begin(req, 4); |
| 369 | if (ret) | 374 | if (ret) |
| 370 | return ret; | 375 | return ret; |
| 371 | 376 | ||
| @@ -379,12 +384,13 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, | |||
| 379 | } | 384 | } |
| 380 | 385 | ||
| 381 | static int | 386 | static int |
| 382 | gen8_emit_pipe_control(struct intel_engine_cs *ring, | 387 | gen8_emit_pipe_control(struct drm_i915_gem_request *req, |
| 383 | u32 flags, u32 scratch_addr) | 388 | u32 flags, u32 scratch_addr) |
| 384 | { | 389 | { |
| 390 | struct intel_engine_cs *ring = req->ring; | ||
| 385 | int ret; | 391 | int ret; |
| 386 | 392 | ||
| 387 | ret = intel_ring_begin(ring, 6); | 393 | ret = intel_ring_begin(req, 6); |
| 388 | if (ret) | 394 | if (ret) |
| 389 | return ret; | 395 | return ret; |
| 390 | 396 | ||
| @@ -400,11 +406,11 @@ gen8_emit_pipe_control(struct intel_engine_cs *ring, | |||
| 400 | } | 406 | } |
| 401 | 407 | ||
| 402 | static int | 408 | static int |
| 403 | gen8_render_ring_flush(struct intel_engine_cs *ring, | 409 | gen8_render_ring_flush(struct drm_i915_gem_request *req, |
| 404 | u32 invalidate_domains, u32 flush_domains) | 410 | u32 invalidate_domains, u32 flush_domains) |
| 405 | { | 411 | { |
| 406 | u32 flags = 0; | 412 | u32 flags = 0; |
| 407 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 413 | u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 408 | int ret; | 414 | int ret; |
| 409 | 415 | ||
| 410 | flags |= PIPE_CONTROL_CS_STALL; | 416 | flags |= PIPE_CONTROL_CS_STALL; |
| @@ -424,7 +430,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring, | |||
| 424 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; | 430 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
| 425 | 431 | ||
| 426 | /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ | 432 | /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ |
| 427 | ret = gen8_emit_pipe_control(ring, | 433 | ret = gen8_emit_pipe_control(req, |
| 428 | PIPE_CONTROL_CS_STALL | | 434 | PIPE_CONTROL_CS_STALL | |
| 429 | PIPE_CONTROL_STALL_AT_SCOREBOARD, | 435 | PIPE_CONTROL_STALL_AT_SCOREBOARD, |
| 430 | 0); | 436 | 0); |
| @@ -432,7 +438,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring, | |||
| 432 | return ret; | 438 | return ret; |
| 433 | } | 439 | } |
| 434 | 440 | ||
| 435 | return gen8_emit_pipe_control(ring, flags, scratch_addr); | 441 | return gen8_emit_pipe_control(req, flags, scratch_addr); |
| 436 | } | 442 | } |
| 437 | 443 | ||
| 438 | static void ring_write_tail(struct intel_engine_cs *ring, | 444 | static void ring_write_tail(struct intel_engine_cs *ring, |
| @@ -703,10 +709,10 @@ err: | |||
| 703 | return ret; | 709 | return ret; |
| 704 | } | 710 | } |
| 705 | 711 | ||
| 706 | static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, | 712 | static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) |
| 707 | struct intel_context *ctx) | ||
| 708 | { | 713 | { |
| 709 | int ret, i; | 714 | int ret, i; |
| 715 | struct intel_engine_cs *ring = req->ring; | ||
| 710 | struct drm_device *dev = ring->dev; | 716 | struct drm_device *dev = ring->dev; |
| 711 | struct drm_i915_private *dev_priv = dev->dev_private; | 717 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 712 | struct i915_workarounds *w = &dev_priv->workarounds; | 718 | struct i915_workarounds *w = &dev_priv->workarounds; |
| @@ -715,11 +721,11 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
| 715 | return 0; | 721 | return 0; |
| 716 | 722 | ||
| 717 | ring->gpu_caches_dirty = true; | 723 | ring->gpu_caches_dirty = true; |
| 718 | ret = intel_ring_flush_all_caches(ring); | 724 | ret = intel_ring_flush_all_caches(req); |
| 719 | if (ret) | 725 | if (ret) |
| 720 | return ret; | 726 | return ret; |
| 721 | 727 | ||
| 722 | ret = intel_ring_begin(ring, (w->count * 2 + 2)); | 728 | ret = intel_ring_begin(req, (w->count * 2 + 2)); |
| 723 | if (ret) | 729 | if (ret) |
| 724 | return ret; | 730 | return ret; |
| 725 | 731 | ||
| @@ -733,7 +739,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
| 733 | intel_ring_advance(ring); | 739 | intel_ring_advance(ring); |
| 734 | 740 | ||
| 735 | ring->gpu_caches_dirty = true; | 741 | ring->gpu_caches_dirty = true; |
| 736 | ret = intel_ring_flush_all_caches(ring); | 742 | ret = intel_ring_flush_all_caches(req); |
| 737 | if (ret) | 743 | if (ret) |
| 738 | return ret; | 744 | return ret; |
| 739 | 745 | ||
| @@ -742,16 +748,15 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
| 742 | return 0; | 748 | return 0; |
| 743 | } | 749 | } |
| 744 | 750 | ||
| 745 | static int intel_rcs_ctx_init(struct intel_engine_cs *ring, | 751 | static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) |
| 746 | struct intel_context *ctx) | ||
| 747 | { | 752 | { |
| 748 | int ret; | 753 | int ret; |
| 749 | 754 | ||
| 750 | ret = intel_ring_workarounds_emit(ring, ctx); | 755 | ret = intel_ring_workarounds_emit(req); |
| 751 | if (ret != 0) | 756 | if (ret != 0) |
| 752 | return ret; | 757 | return ret; |
| 753 | 758 | ||
| 754 | ret = i915_gem_render_state_init(ring); | 759 | ret = i915_gem_render_state_init(req); |
| 755 | if (ret) | 760 | if (ret) |
| 756 | DRM_ERROR("init render state: %d\n", ret); | 761 | DRM_ERROR("init render state: %d\n", ret); |
| 757 | 762 | ||
| @@ -800,6 +805,11 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) | |||
| 800 | struct drm_device *dev = ring->dev; | 805 | struct drm_device *dev = ring->dev; |
| 801 | struct drm_i915_private *dev_priv = dev->dev_private; | 806 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 802 | 807 | ||
| 808 | WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); | ||
| 809 | |||
| 810 | /* WaDisableAsyncFlipPerfMode:bdw */ | ||
| 811 | WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); | ||
| 812 | |||
| 803 | /* WaDisablePartialInstShootdown:bdw */ | 813 | /* WaDisablePartialInstShootdown:bdw */ |
| 804 | /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ | 814 | /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ |
| 805 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 815 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
| @@ -861,6 +871,11 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) | |||
| 861 | struct drm_device *dev = ring->dev; | 871 | struct drm_device *dev = ring->dev; |
| 862 | struct drm_i915_private *dev_priv = dev->dev_private; | 872 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 863 | 873 | ||
| 874 | WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); | ||
| 875 | |||
| 876 | /* WaDisableAsyncFlipPerfMode:chv */ | ||
| 877 | WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); | ||
| 878 | |||
| 864 | /* WaDisablePartialInstShootdown:chv */ | 879 | /* WaDisablePartialInstShootdown:chv */ |
| 865 | /* WaDisableThreadStallDopClockGating:chv */ | 880 | /* WaDisableThreadStallDopClockGating:chv */ |
| 866 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 881 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
| @@ -931,8 +946,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) | |||
| 931 | /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ | 946 | /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ |
| 932 | WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, | 947 | WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, |
| 933 | GEN9_RHWO_OPTIMIZATION_DISABLE); | 948 | GEN9_RHWO_OPTIMIZATION_DISABLE); |
| 934 | WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0, | 949 | /* |
| 935 | DISABLE_PIXEL_MASK_CAMMING); | 950 | * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set |
| 951 | * but we do that in per ctx batchbuffer as there is an issue | ||
| 952 | * with this register not getting restored on ctx restore | ||
| 953 | */ | ||
| 936 | } | 954 | } |
| 937 | 955 | ||
| 938 | if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) || | 956 | if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) || |
| @@ -1041,6 +1059,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) | |||
| 1041 | HDC_FORCE_NON_COHERENT); | 1059 | HDC_FORCE_NON_COHERENT); |
| 1042 | } | 1060 | } |
| 1043 | 1061 | ||
| 1062 | if (INTEL_REVID(dev) == SKL_REVID_C0 || | ||
| 1063 | INTEL_REVID(dev) == SKL_REVID_D0) | ||
| 1064 | /* WaBarrierPerformanceFixDisable:skl */ | ||
| 1065 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | ||
| 1066 | HDC_FENCE_DEST_SLM_DISABLE | | ||
| 1067 | HDC_BARRIER_PERFORMANCE_DISABLE); | ||
| 1068 | |||
| 1044 | return skl_tune_iz_hashing(ring); | 1069 | return skl_tune_iz_hashing(ring); |
| 1045 | } | 1070 | } |
| 1046 | 1071 | ||
| @@ -1105,9 +1130,9 @@ static int init_render_ring(struct intel_engine_cs *ring) | |||
| 1105 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be | 1130 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be |
| 1106 | * programmed to '1' on all products. | 1131 | * programmed to '1' on all products. |
| 1107 | * | 1132 | * |
| 1108 | * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv | 1133 | * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv |
| 1109 | */ | 1134 | */ |
| 1110 | if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9) | 1135 | if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) |
| 1111 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); | 1136 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); |
| 1112 | 1137 | ||
| 1113 | /* Required for the hardware to program scanline values for waiting */ | 1138 | /* Required for the hardware to program scanline values for waiting */ |
| @@ -1132,7 +1157,7 @@ static int init_render_ring(struct intel_engine_cs *ring) | |||
| 1132 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | 1157 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); |
| 1133 | } | 1158 | } |
| 1134 | 1159 | ||
| 1135 | if (INTEL_INFO(dev)->gen >= 6) | 1160 | if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) |
| 1136 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | 1161 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
| 1137 | 1162 | ||
| 1138 | if (HAS_L3_DPF(dev)) | 1163 | if (HAS_L3_DPF(dev)) |
| @@ -1155,10 +1180,11 @@ static void render_ring_cleanup(struct intel_engine_cs *ring) | |||
| 1155 | intel_fini_pipe_control(ring); | 1180 | intel_fini_pipe_control(ring); |
| 1156 | } | 1181 | } |
| 1157 | 1182 | ||
| 1158 | static int gen8_rcs_signal(struct intel_engine_cs *signaller, | 1183 | static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, |
| 1159 | unsigned int num_dwords) | 1184 | unsigned int num_dwords) |
| 1160 | { | 1185 | { |
| 1161 | #define MBOX_UPDATE_DWORDS 8 | 1186 | #define MBOX_UPDATE_DWORDS 8 |
| 1187 | struct intel_engine_cs *signaller = signaller_req->ring; | ||
| 1162 | struct drm_device *dev = signaller->dev; | 1188 | struct drm_device *dev = signaller->dev; |
| 1163 | struct drm_i915_private *dev_priv = dev->dev_private; | 1189 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1164 | struct intel_engine_cs *waiter; | 1190 | struct intel_engine_cs *waiter; |
| @@ -1168,7 +1194,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller, | |||
| 1168 | num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; | 1194 | num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; |
| 1169 | #undef MBOX_UPDATE_DWORDS | 1195 | #undef MBOX_UPDATE_DWORDS |
| 1170 | 1196 | ||
| 1171 | ret = intel_ring_begin(signaller, num_dwords); | 1197 | ret = intel_ring_begin(signaller_req, num_dwords); |
| 1172 | if (ret) | 1198 | if (ret) |
| 1173 | return ret; | 1199 | return ret; |
| 1174 | 1200 | ||
| @@ -1178,8 +1204,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller, | |||
| 1178 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) | 1204 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) |
| 1179 | continue; | 1205 | continue; |
| 1180 | 1206 | ||
| 1181 | seqno = i915_gem_request_get_seqno( | 1207 | seqno = i915_gem_request_get_seqno(signaller_req); |
| 1182 | signaller->outstanding_lazy_request); | ||
| 1183 | intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); | 1208 | intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); |
| 1184 | intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | | 1209 | intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | |
| 1185 | PIPE_CONTROL_QW_WRITE | | 1210 | PIPE_CONTROL_QW_WRITE | |
| @@ -1196,10 +1221,11 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller, | |||
| 1196 | return 0; | 1221 | return 0; |
| 1197 | } | 1222 | } |
| 1198 | 1223 | ||
| 1199 | static int gen8_xcs_signal(struct intel_engine_cs *signaller, | 1224 | static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, |
| 1200 | unsigned int num_dwords) | 1225 | unsigned int num_dwords) |
| 1201 | { | 1226 | { |
| 1202 | #define MBOX_UPDATE_DWORDS 6 | 1227 | #define MBOX_UPDATE_DWORDS 6 |
| 1228 | struct intel_engine_cs *signaller = signaller_req->ring; | ||
| 1203 | struct drm_device *dev = signaller->dev; | 1229 | struct drm_device *dev = signaller->dev; |
| 1204 | struct drm_i915_private *dev_priv = dev->dev_private; | 1230 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1205 | struct intel_engine_cs *waiter; | 1231 | struct intel_engine_cs *waiter; |
| @@ -1209,7 +1235,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller, | |||
| 1209 | num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; | 1235 | num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; |
| 1210 | #undef MBOX_UPDATE_DWORDS | 1236 | #undef MBOX_UPDATE_DWORDS |
| 1211 | 1237 | ||
| 1212 | ret = intel_ring_begin(signaller, num_dwords); | 1238 | ret = intel_ring_begin(signaller_req, num_dwords); |
| 1213 | if (ret) | 1239 | if (ret) |
| 1214 | return ret; | 1240 | return ret; |
| 1215 | 1241 | ||
| @@ -1219,8 +1245,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller, | |||
| 1219 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) | 1245 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) |
| 1220 | continue; | 1246 | continue; |
| 1221 | 1247 | ||
| 1222 | seqno = i915_gem_request_get_seqno( | 1248 | seqno = i915_gem_request_get_seqno(signaller_req); |
| 1223 | signaller->outstanding_lazy_request); | ||
| 1224 | intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | | 1249 | intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | |
| 1225 | MI_FLUSH_DW_OP_STOREDW); | 1250 | MI_FLUSH_DW_OP_STOREDW); |
| 1226 | intel_ring_emit(signaller, lower_32_bits(gtt_offset) | | 1251 | intel_ring_emit(signaller, lower_32_bits(gtt_offset) | |
| @@ -1235,9 +1260,10 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller, | |||
| 1235 | return 0; | 1260 | return 0; |
| 1236 | } | 1261 | } |
| 1237 | 1262 | ||
| 1238 | static int gen6_signal(struct intel_engine_cs *signaller, | 1263 | static int gen6_signal(struct drm_i915_gem_request *signaller_req, |
| 1239 | unsigned int num_dwords) | 1264 | unsigned int num_dwords) |
| 1240 | { | 1265 | { |
| 1266 | struct intel_engine_cs *signaller = signaller_req->ring; | ||
| 1241 | struct drm_device *dev = signaller->dev; | 1267 | struct drm_device *dev = signaller->dev; |
| 1242 | struct drm_i915_private *dev_priv = dev->dev_private; | 1268 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1243 | struct intel_engine_cs *useless; | 1269 | struct intel_engine_cs *useless; |
| @@ -1248,15 +1274,14 @@ static int gen6_signal(struct intel_engine_cs *signaller, | |||
| 1248 | num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); | 1274 | num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); |
| 1249 | #undef MBOX_UPDATE_DWORDS | 1275 | #undef MBOX_UPDATE_DWORDS |
| 1250 | 1276 | ||
| 1251 | ret = intel_ring_begin(signaller, num_dwords); | 1277 | ret = intel_ring_begin(signaller_req, num_dwords); |
| 1252 | if (ret) | 1278 | if (ret) |
| 1253 | return ret; | 1279 | return ret; |
| 1254 | 1280 | ||
| 1255 | for_each_ring(useless, dev_priv, i) { | 1281 | for_each_ring(useless, dev_priv, i) { |
| 1256 | u32 mbox_reg = signaller->semaphore.mbox.signal[i]; | 1282 | u32 mbox_reg = signaller->semaphore.mbox.signal[i]; |
| 1257 | if (mbox_reg != GEN6_NOSYNC) { | 1283 | if (mbox_reg != GEN6_NOSYNC) { |
| 1258 | u32 seqno = i915_gem_request_get_seqno( | 1284 | u32 seqno = i915_gem_request_get_seqno(signaller_req); |
| 1259 | signaller->outstanding_lazy_request); | ||
| 1260 | intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); | 1285 | intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); |
| 1261 | intel_ring_emit(signaller, mbox_reg); | 1286 | intel_ring_emit(signaller, mbox_reg); |
| 1262 | intel_ring_emit(signaller, seqno); | 1287 | intel_ring_emit(signaller, seqno); |
| @@ -1272,30 +1297,29 @@ static int gen6_signal(struct intel_engine_cs *signaller, | |||
| 1272 | 1297 | ||
| 1273 | /** | 1298 | /** |
| 1274 | * gen6_add_request - Update the semaphore mailbox registers | 1299 | * gen6_add_request - Update the semaphore mailbox registers |
| 1275 | * | 1300 | * |
| 1276 | * @ring - ring that is adding a request | 1301 | * @request - request to write to the ring |
| 1277 | * @seqno - return seqno stuck into the ring | ||
| 1278 | * | 1302 | * |
| 1279 | * Update the mailbox registers in the *other* rings with the current seqno. | 1303 | * Update the mailbox registers in the *other* rings with the current seqno. |
| 1280 | * This acts like a signal in the canonical semaphore. | 1304 | * This acts like a signal in the canonical semaphore. |
| 1281 | */ | 1305 | */ |
| 1282 | static int | 1306 | static int |
| 1283 | gen6_add_request(struct intel_engine_cs *ring) | 1307 | gen6_add_request(struct drm_i915_gem_request *req) |
| 1284 | { | 1308 | { |
| 1309 | struct intel_engine_cs *ring = req->ring; | ||
| 1285 | int ret; | 1310 | int ret; |
| 1286 | 1311 | ||
| 1287 | if (ring->semaphore.signal) | 1312 | if (ring->semaphore.signal) |
| 1288 | ret = ring->semaphore.signal(ring, 4); | 1313 | ret = ring->semaphore.signal(req, 4); |
| 1289 | else | 1314 | else |
| 1290 | ret = intel_ring_begin(ring, 4); | 1315 | ret = intel_ring_begin(req, 4); |
| 1291 | 1316 | ||
| 1292 | if (ret) | 1317 | if (ret) |
| 1293 | return ret; | 1318 | return ret; |
| 1294 | 1319 | ||
| 1295 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 1320 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
| 1296 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1321 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
| 1297 | intel_ring_emit(ring, | 1322 | intel_ring_emit(ring, i915_gem_request_get_seqno(req)); |
| 1298 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
| 1299 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 1323 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
| 1300 | __intel_ring_advance(ring); | 1324 | __intel_ring_advance(ring); |
| 1301 | 1325 | ||
| @@ -1318,14 +1342,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, | |||
| 1318 | */ | 1342 | */ |
| 1319 | 1343 | ||
| 1320 | static int | 1344 | static int |
| 1321 | gen8_ring_sync(struct intel_engine_cs *waiter, | 1345 | gen8_ring_sync(struct drm_i915_gem_request *waiter_req, |
| 1322 | struct intel_engine_cs *signaller, | 1346 | struct intel_engine_cs *signaller, |
| 1323 | u32 seqno) | 1347 | u32 seqno) |
| 1324 | { | 1348 | { |
| 1349 | struct intel_engine_cs *waiter = waiter_req->ring; | ||
| 1325 | struct drm_i915_private *dev_priv = waiter->dev->dev_private; | 1350 | struct drm_i915_private *dev_priv = waiter->dev->dev_private; |
| 1326 | int ret; | 1351 | int ret; |
| 1327 | 1352 | ||
| 1328 | ret = intel_ring_begin(waiter, 4); | 1353 | ret = intel_ring_begin(waiter_req, 4); |
| 1329 | if (ret) | 1354 | if (ret) |
| 1330 | return ret; | 1355 | return ret; |
| 1331 | 1356 | ||
| @@ -1343,10 +1368,11 @@ gen8_ring_sync(struct intel_engine_cs *waiter, | |||
| 1343 | } | 1368 | } |
| 1344 | 1369 | ||
| 1345 | static int | 1370 | static int |
| 1346 | gen6_ring_sync(struct intel_engine_cs *waiter, | 1371 | gen6_ring_sync(struct drm_i915_gem_request *waiter_req, |
| 1347 | struct intel_engine_cs *signaller, | 1372 | struct intel_engine_cs *signaller, |
| 1348 | u32 seqno) | 1373 | u32 seqno) |
| 1349 | { | 1374 | { |
| 1375 | struct intel_engine_cs *waiter = waiter_req->ring; | ||
| 1350 | u32 dw1 = MI_SEMAPHORE_MBOX | | 1376 | u32 dw1 = MI_SEMAPHORE_MBOX | |
| 1351 | MI_SEMAPHORE_COMPARE | | 1377 | MI_SEMAPHORE_COMPARE | |
| 1352 | MI_SEMAPHORE_REGISTER; | 1378 | MI_SEMAPHORE_REGISTER; |
| @@ -1361,7 +1387,7 @@ gen6_ring_sync(struct intel_engine_cs *waiter, | |||
| 1361 | 1387 | ||
| 1362 | WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); | 1388 | WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); |
| 1363 | 1389 | ||
| 1364 | ret = intel_ring_begin(waiter, 4); | 1390 | ret = intel_ring_begin(waiter_req, 4); |
| 1365 | if (ret) | 1391 | if (ret) |
| 1366 | return ret; | 1392 | return ret; |
| 1367 | 1393 | ||
| @@ -1392,8 +1418,9 @@ do { \ | |||
| 1392 | } while (0) | 1418 | } while (0) |
| 1393 | 1419 | ||
| 1394 | static int | 1420 | static int |
| 1395 | pc_render_add_request(struct intel_engine_cs *ring) | 1421 | pc_render_add_request(struct drm_i915_gem_request *req) |
| 1396 | { | 1422 | { |
| 1423 | struct intel_engine_cs *ring = req->ring; | ||
| 1397 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 1424 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 1398 | int ret; | 1425 | int ret; |
| 1399 | 1426 | ||
| @@ -1405,7 +1432,7 @@ pc_render_add_request(struct intel_engine_cs *ring) | |||
| 1405 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to | 1432 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
| 1406 | * memory before requesting an interrupt. | 1433 | * memory before requesting an interrupt. |
| 1407 | */ | 1434 | */ |
| 1408 | ret = intel_ring_begin(ring, 32); | 1435 | ret = intel_ring_begin(req, 32); |
| 1409 | if (ret) | 1436 | if (ret) |
| 1410 | return ret; | 1437 | return ret; |
| 1411 | 1438 | ||
| @@ -1413,8 +1440,7 @@ pc_render_add_request(struct intel_engine_cs *ring) | |||
| 1413 | PIPE_CONTROL_WRITE_FLUSH | | 1440 | PIPE_CONTROL_WRITE_FLUSH | |
| 1414 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | 1441 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); |
| 1415 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 1442 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
| 1416 | intel_ring_emit(ring, | 1443 | intel_ring_emit(ring, i915_gem_request_get_seqno(req)); |
| 1417 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
| 1418 | intel_ring_emit(ring, 0); | 1444 | intel_ring_emit(ring, 0); |
| 1419 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 1445 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
| 1420 | scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ | 1446 | scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ |
| @@ -1433,8 +1459,7 @@ pc_render_add_request(struct intel_engine_cs *ring) | |||
| 1433 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | 1459 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
| 1434 | PIPE_CONTROL_NOTIFY); | 1460 | PIPE_CONTROL_NOTIFY); |
| 1435 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 1461 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
| 1436 | intel_ring_emit(ring, | 1462 | intel_ring_emit(ring, i915_gem_request_get_seqno(req)); |
| 1437 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
| 1438 | intel_ring_emit(ring, 0); | 1463 | intel_ring_emit(ring, 0); |
| 1439 | __intel_ring_advance(ring); | 1464 | __intel_ring_advance(ring); |
| 1440 | 1465 | ||
| @@ -1585,13 +1610,14 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring) | |||
| 1585 | } | 1610 | } |
| 1586 | 1611 | ||
| 1587 | static int | 1612 | static int |
| 1588 | bsd_ring_flush(struct intel_engine_cs *ring, | 1613 | bsd_ring_flush(struct drm_i915_gem_request *req, |
| 1589 | u32 invalidate_domains, | 1614 | u32 invalidate_domains, |
| 1590 | u32 flush_domains) | 1615 | u32 flush_domains) |
| 1591 | { | 1616 | { |
| 1617 | struct intel_engine_cs *ring = req->ring; | ||
| 1592 | int ret; | 1618 | int ret; |
| 1593 | 1619 | ||
| 1594 | ret = intel_ring_begin(ring, 2); | 1620 | ret = intel_ring_begin(req, 2); |
| 1595 | if (ret) | 1621 | if (ret) |
| 1596 | return ret; | 1622 | return ret; |
| 1597 | 1623 | ||
| @@ -1602,18 +1628,18 @@ bsd_ring_flush(struct intel_engine_cs *ring, | |||
| 1602 | } | 1628 | } |
| 1603 | 1629 | ||
| 1604 | static int | 1630 | static int |
| 1605 | i9xx_add_request(struct intel_engine_cs *ring) | 1631 | i9xx_add_request(struct drm_i915_gem_request *req) |
| 1606 | { | 1632 | { |
| 1633 | struct intel_engine_cs *ring = req->ring; | ||
| 1607 | int ret; | 1634 | int ret; |
| 1608 | 1635 | ||
| 1609 | ret = intel_ring_begin(ring, 4); | 1636 | ret = intel_ring_begin(req, 4); |
| 1610 | if (ret) | 1637 | if (ret) |
| 1611 | return ret; | 1638 | return ret; |
| 1612 | 1639 | ||
| 1613 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 1640 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
| 1614 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1641 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
| 1615 | intel_ring_emit(ring, | 1642 | intel_ring_emit(ring, i915_gem_request_get_seqno(req)); |
| 1616 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
| 1617 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 1643 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
| 1618 | __intel_ring_advance(ring); | 1644 | __intel_ring_advance(ring); |
| 1619 | 1645 | ||
| @@ -1745,13 +1771,14 @@ gen8_ring_put_irq(struct intel_engine_cs *ring) | |||
| 1745 | } | 1771 | } |
| 1746 | 1772 | ||
| 1747 | static int | 1773 | static int |
| 1748 | i965_dispatch_execbuffer(struct intel_engine_cs *ring, | 1774 | i965_dispatch_execbuffer(struct drm_i915_gem_request *req, |
| 1749 | u64 offset, u32 length, | 1775 | u64 offset, u32 length, |
| 1750 | unsigned dispatch_flags) | 1776 | unsigned dispatch_flags) |
| 1751 | { | 1777 | { |
| 1778 | struct intel_engine_cs *ring = req->ring; | ||
| 1752 | int ret; | 1779 | int ret; |
| 1753 | 1780 | ||
| 1754 | ret = intel_ring_begin(ring, 2); | 1781 | ret = intel_ring_begin(req, 2); |
| 1755 | if (ret) | 1782 | if (ret) |
| 1756 | return ret; | 1783 | return ret; |
| 1757 | 1784 | ||
| @@ -1771,14 +1798,15 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
| 1771 | #define I830_TLB_ENTRIES (2) | 1798 | #define I830_TLB_ENTRIES (2) |
| 1772 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) | 1799 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) |
| 1773 | static int | 1800 | static int |
| 1774 | i830_dispatch_execbuffer(struct intel_engine_cs *ring, | 1801 | i830_dispatch_execbuffer(struct drm_i915_gem_request *req, |
| 1775 | u64 offset, u32 len, | 1802 | u64 offset, u32 len, |
| 1776 | unsigned dispatch_flags) | 1803 | unsigned dispatch_flags) |
| 1777 | { | 1804 | { |
| 1805 | struct intel_engine_cs *ring = req->ring; | ||
| 1778 | u32 cs_offset = ring->scratch.gtt_offset; | 1806 | u32 cs_offset = ring->scratch.gtt_offset; |
| 1779 | int ret; | 1807 | int ret; |
| 1780 | 1808 | ||
| 1781 | ret = intel_ring_begin(ring, 6); | 1809 | ret = intel_ring_begin(req, 6); |
| 1782 | if (ret) | 1810 | if (ret) |
| 1783 | return ret; | 1811 | return ret; |
| 1784 | 1812 | ||
| @@ -1795,7 +1823,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
| 1795 | if (len > I830_BATCH_LIMIT) | 1823 | if (len > I830_BATCH_LIMIT) |
| 1796 | return -ENOSPC; | 1824 | return -ENOSPC; |
| 1797 | 1825 | ||
| 1798 | ret = intel_ring_begin(ring, 6 + 2); | 1826 | ret = intel_ring_begin(req, 6 + 2); |
| 1799 | if (ret) | 1827 | if (ret) |
| 1800 | return ret; | 1828 | return ret; |
| 1801 | 1829 | ||
| @@ -1818,7 +1846,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
| 1818 | offset = cs_offset; | 1846 | offset = cs_offset; |
| 1819 | } | 1847 | } |
| 1820 | 1848 | ||
| 1821 | ret = intel_ring_begin(ring, 4); | 1849 | ret = intel_ring_begin(req, 4); |
| 1822 | if (ret) | 1850 | if (ret) |
| 1823 | return ret; | 1851 | return ret; |
| 1824 | 1852 | ||
| @@ -1833,13 +1861,14 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
| 1833 | } | 1861 | } |
| 1834 | 1862 | ||
| 1835 | static int | 1863 | static int |
| 1836 | i915_dispatch_execbuffer(struct intel_engine_cs *ring, | 1864 | i915_dispatch_execbuffer(struct drm_i915_gem_request *req, |
| 1837 | u64 offset, u32 len, | 1865 | u64 offset, u32 len, |
| 1838 | unsigned dispatch_flags) | 1866 | unsigned dispatch_flags) |
| 1839 | { | 1867 | { |
| 1868 | struct intel_engine_cs *ring = req->ring; | ||
| 1840 | int ret; | 1869 | int ret; |
| 1841 | 1870 | ||
| 1842 | ret = intel_ring_begin(ring, 2); | 1871 | ret = intel_ring_begin(req, 2); |
| 1843 | if (ret) | 1872 | if (ret) |
| 1844 | return ret; | 1873 | return ret; |
| 1845 | 1874 | ||
| @@ -2082,7 +2111,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
| 2082 | 2111 | ||
| 2083 | intel_unpin_ringbuffer_obj(ringbuf); | 2112 | intel_unpin_ringbuffer_obj(ringbuf); |
| 2084 | intel_destroy_ringbuffer_obj(ringbuf); | 2113 | intel_destroy_ringbuffer_obj(ringbuf); |
| 2085 | i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); | ||
| 2086 | 2114 | ||
| 2087 | if (ring->cleanup) | 2115 | if (ring->cleanup) |
| 2088 | ring->cleanup(ring); | 2116 | ring->cleanup(ring); |
| @@ -2106,6 +2134,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
| 2106 | if (intel_ring_space(ringbuf) >= n) | 2134 | if (intel_ring_space(ringbuf) >= n) |
| 2107 | return 0; | 2135 | return 0; |
| 2108 | 2136 | ||
| 2137 | /* The whole point of reserving space is to not wait! */ | ||
| 2138 | WARN_ON(ringbuf->reserved_in_use); | ||
| 2139 | |||
| 2109 | list_for_each_entry(request, &ring->request_list, list) { | 2140 | list_for_each_entry(request, &ring->request_list, list) { |
| 2110 | space = __intel_ring_space(request->postfix, ringbuf->tail, | 2141 | space = __intel_ring_space(request->postfix, ringbuf->tail, |
| 2111 | ringbuf->size); | 2142 | ringbuf->size); |
| @@ -2124,18 +2155,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
| 2124 | return 0; | 2155 | return 0; |
| 2125 | } | 2156 | } |
| 2126 | 2157 | ||
| 2127 | static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) | 2158 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) |
| 2128 | { | 2159 | { |
| 2129 | uint32_t __iomem *virt; | 2160 | uint32_t __iomem *virt; |
| 2130 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
| 2131 | int rem = ringbuf->size - ringbuf->tail; | 2161 | int rem = ringbuf->size - ringbuf->tail; |
| 2132 | 2162 | ||
| 2133 | if (ringbuf->space < rem) { | ||
| 2134 | int ret = ring_wait_for_space(ring, rem); | ||
| 2135 | if (ret) | ||
| 2136 | return ret; | ||
| 2137 | } | ||
| 2138 | |||
| 2139 | virt = ringbuf->virtual_start + ringbuf->tail; | 2163 | virt = ringbuf->virtual_start + ringbuf->tail; |
| 2140 | rem /= 4; | 2164 | rem /= 4; |
| 2141 | while (rem--) | 2165 | while (rem--) |
| @@ -2143,21 +2167,11 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) | |||
| 2143 | 2167 | ||
| 2144 | ringbuf->tail = 0; | 2168 | ringbuf->tail = 0; |
| 2145 | intel_ring_update_space(ringbuf); | 2169 | intel_ring_update_space(ringbuf); |
| 2146 | |||
| 2147 | return 0; | ||
| 2148 | } | 2170 | } |
| 2149 | 2171 | ||
| 2150 | int intel_ring_idle(struct intel_engine_cs *ring) | 2172 | int intel_ring_idle(struct intel_engine_cs *ring) |
| 2151 | { | 2173 | { |
| 2152 | struct drm_i915_gem_request *req; | 2174 | struct drm_i915_gem_request *req; |
| 2153 | int ret; | ||
| 2154 | |||
| 2155 | /* We need to add any requests required to flush the objects and ring */ | ||
| 2156 | if (ring->outstanding_lazy_request) { | ||
| 2157 | ret = i915_add_request(ring); | ||
| 2158 | if (ret) | ||
| 2159 | return ret; | ||
| 2160 | } | ||
| 2161 | 2175 | ||
| 2162 | /* Wait upon the last request to be completed */ | 2176 | /* Wait upon the last request to be completed */ |
| 2163 | if (list_empty(&ring->request_list)) | 2177 | if (list_empty(&ring->request_list)) |
| @@ -2180,33 +2194,126 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) | |||
| 2180 | return 0; | 2194 | return 0; |
| 2181 | } | 2195 | } |
| 2182 | 2196 | ||
| 2183 | static int __intel_ring_prepare(struct intel_engine_cs *ring, | 2197 | int intel_ring_reserve_space(struct drm_i915_gem_request *request) |
| 2184 | int bytes) | 2198 | { |
| 2199 | /* | ||
| 2200 | * The first call merely notes the reserve request and is common for | ||
| 2201 | * all back ends. The subsequent localised _begin() call actually | ||
| 2202 | * ensures that the reservation is available. Without the begin, if | ||
| 2203 | * the request creator immediately submitted the request without | ||
| 2204 | * adding any commands to it then there might not actually be | ||
| 2205 | * sufficient room for the submission commands. | ||
| 2206 | */ | ||
| 2207 | intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); | ||
| 2208 | |||
| 2209 | return intel_ring_begin(request, 0); | ||
| 2210 | } | ||
| 2211 | |||
| 2212 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) | ||
| 2213 | { | ||
| 2214 | WARN_ON(ringbuf->reserved_size); | ||
| 2215 | WARN_ON(ringbuf->reserved_in_use); | ||
| 2216 | |||
| 2217 | ringbuf->reserved_size = size; | ||
| 2218 | } | ||
| 2219 | |||
| 2220 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) | ||
| 2221 | { | ||
| 2222 | WARN_ON(ringbuf->reserved_in_use); | ||
| 2223 | |||
| 2224 | ringbuf->reserved_size = 0; | ||
| 2225 | ringbuf->reserved_in_use = false; | ||
| 2226 | } | ||
| 2227 | |||
| 2228 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) | ||
| 2229 | { | ||
| 2230 | WARN_ON(ringbuf->reserved_in_use); | ||
| 2231 | |||
| 2232 | ringbuf->reserved_in_use = true; | ||
| 2233 | ringbuf->reserved_tail = ringbuf->tail; | ||
| 2234 | } | ||
| 2235 | |||
| 2236 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) | ||
| 2237 | { | ||
| 2238 | WARN_ON(!ringbuf->reserved_in_use); | ||
| 2239 | if (ringbuf->tail > ringbuf->reserved_tail) { | ||
| 2240 | WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, | ||
| 2241 | "request reserved size too small: %d vs %d!\n", | ||
| 2242 | ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); | ||
| 2243 | } else { | ||
| 2244 | /* | ||
| 2245 | * The ring was wrapped while the reserved space was in use. | ||
| 2246 | * That means that some unknown amount of the ring tail was | ||
| 2247 | * no-op filled and skipped. Thus simply adding the ring size | ||
| 2248 | * to the tail and doing the above space check will not work. | ||
| 2249 | * Rather than attempt to track how much tail was skipped, | ||
| 2250 | * it is much simpler to say that also skipping the sanity | ||
| 2251 | * check every once in a while is not a big issue. | ||
| 2252 | */ | ||
| 2253 | } | ||
| 2254 | |||
| 2255 | ringbuf->reserved_size = 0; | ||
| 2256 | ringbuf->reserved_in_use = false; | ||
| 2257 | } | ||
| 2258 | |||
| 2259 | static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) | ||
| 2185 | { | 2260 | { |
| 2186 | struct intel_ringbuffer *ringbuf = ring->buffer; | 2261 | struct intel_ringbuffer *ringbuf = ring->buffer; |
| 2187 | int ret; | 2262 | int remain_usable = ringbuf->effective_size - ringbuf->tail; |
| 2263 | int remain_actual = ringbuf->size - ringbuf->tail; | ||
| 2264 | int ret, total_bytes, wait_bytes = 0; | ||
| 2265 | bool need_wrap = false; | ||
| 2188 | 2266 | ||
| 2189 | if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { | 2267 | if (ringbuf->reserved_in_use) |
| 2190 | ret = intel_wrap_ring_buffer(ring); | 2268 | total_bytes = bytes; |
| 2191 | if (unlikely(ret)) | 2269 | else |
| 2192 | return ret; | 2270 | total_bytes = bytes + ringbuf->reserved_size; |
| 2271 | |||
| 2272 | if (unlikely(bytes > remain_usable)) { | ||
| 2273 | /* | ||
| 2274 | * Not enough space for the basic request. So need to flush | ||
| 2275 | * out the remainder and then wait for base + reserved. | ||
| 2276 | */ | ||
| 2277 | wait_bytes = remain_actual + total_bytes; | ||
| 2278 | need_wrap = true; | ||
| 2279 | } else { | ||
| 2280 | if (unlikely(total_bytes > remain_usable)) { | ||
| 2281 | /* | ||
| 2282 | * The base request will fit but the reserved space | ||
| 2283 | * falls off the end. So only need to to wait for the | ||
| 2284 | * reserved size after flushing out the remainder. | ||
| 2285 | */ | ||
| 2286 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
| 2287 | need_wrap = true; | ||
| 2288 | } else if (total_bytes > ringbuf->space) { | ||
| 2289 | /* No wrapping required, just waiting. */ | ||
| 2290 | wait_bytes = total_bytes; | ||
| 2291 | } | ||
| 2193 | } | 2292 | } |
| 2194 | 2293 | ||
| 2195 | if (unlikely(ringbuf->space < bytes)) { | 2294 | if (wait_bytes) { |
| 2196 | ret = ring_wait_for_space(ring, bytes); | 2295 | ret = ring_wait_for_space(ring, wait_bytes); |
| 2197 | if (unlikely(ret)) | 2296 | if (unlikely(ret)) |
| 2198 | return ret; | 2297 | return ret; |
| 2298 | |||
| 2299 | if (need_wrap) | ||
| 2300 | __wrap_ring_buffer(ringbuf); | ||
| 2199 | } | 2301 | } |
| 2200 | 2302 | ||
| 2201 | return 0; | 2303 | return 0; |
| 2202 | } | 2304 | } |
| 2203 | 2305 | ||
| 2204 | int intel_ring_begin(struct intel_engine_cs *ring, | 2306 | int intel_ring_begin(struct drm_i915_gem_request *req, |
| 2205 | int num_dwords) | 2307 | int num_dwords) |
| 2206 | { | 2308 | { |
| 2207 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 2309 | struct intel_engine_cs *ring; |
| 2310 | struct drm_i915_private *dev_priv; | ||
| 2208 | int ret; | 2311 | int ret; |
| 2209 | 2312 | ||
| 2313 | WARN_ON(req == NULL); | ||
| 2314 | ring = req->ring; | ||
| 2315 | dev_priv = ring->dev->dev_private; | ||
| 2316 | |||
| 2210 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | 2317 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
| 2211 | dev_priv->mm.interruptible); | 2318 | dev_priv->mm.interruptible); |
| 2212 | if (ret) | 2319 | if (ret) |
| @@ -2216,18 +2323,14 @@ int intel_ring_begin(struct intel_engine_cs *ring, | |||
| 2216 | if (ret) | 2323 | if (ret) |
| 2217 | return ret; | 2324 | return ret; |
| 2218 | 2325 | ||
| 2219 | /* Preallocate the olr before touching the ring */ | ||
| 2220 | ret = i915_gem_request_alloc(ring, ring->default_context); | ||
| 2221 | if (ret) | ||
| 2222 | return ret; | ||
| 2223 | |||
| 2224 | ring->buffer->space -= num_dwords * sizeof(uint32_t); | 2326 | ring->buffer->space -= num_dwords * sizeof(uint32_t); |
| 2225 | return 0; | 2327 | return 0; |
| 2226 | } | 2328 | } |
| 2227 | 2329 | ||
| 2228 | /* Align the ring tail to a cacheline boundary */ | 2330 | /* Align the ring tail to a cacheline boundary */ |
| 2229 | int intel_ring_cacheline_align(struct intel_engine_cs *ring) | 2331 | int intel_ring_cacheline_align(struct drm_i915_gem_request *req) |
| 2230 | { | 2332 | { |
| 2333 | struct intel_engine_cs *ring = req->ring; | ||
| 2231 | int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); | 2334 | int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); |
| 2232 | int ret; | 2335 | int ret; |
| 2233 | 2336 | ||
| @@ -2235,7 +2338,7 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring) | |||
| 2235 | return 0; | 2338 | return 0; |
| 2236 | 2339 | ||
| 2237 | num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; | 2340 | num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; |
| 2238 | ret = intel_ring_begin(ring, num_dwords); | 2341 | ret = intel_ring_begin(req, num_dwords); |
| 2239 | if (ret) | 2342 | if (ret) |
| 2240 | return ret; | 2343 | return ret; |
| 2241 | 2344 | ||
| @@ -2252,8 +2355,6 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) | |||
| 2252 | struct drm_device *dev = ring->dev; | 2355 | struct drm_device *dev = ring->dev; |
| 2253 | struct drm_i915_private *dev_priv = dev->dev_private; | 2356 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2254 | 2357 | ||
| 2255 | BUG_ON(ring->outstanding_lazy_request); | ||
| 2256 | |||
| 2257 | if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { | 2358 | if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { |
| 2258 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); | 2359 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); |
| 2259 | I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); | 2360 | I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); |
| @@ -2298,13 +2399,14 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, | |||
| 2298 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | 2399 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
| 2299 | } | 2400 | } |
| 2300 | 2401 | ||
| 2301 | static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, | 2402 | static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, |
| 2302 | u32 invalidate, u32 flush) | 2403 | u32 invalidate, u32 flush) |
| 2303 | { | 2404 | { |
| 2405 | struct intel_engine_cs *ring = req->ring; | ||
| 2304 | uint32_t cmd; | 2406 | uint32_t cmd; |
| 2305 | int ret; | 2407 | int ret; |
| 2306 | 2408 | ||
| 2307 | ret = intel_ring_begin(ring, 4); | 2409 | ret = intel_ring_begin(req, 4); |
| 2308 | if (ret) | 2410 | if (ret) |
| 2309 | return ret; | 2411 | return ret; |
| 2310 | 2412 | ||
| @@ -2342,20 +2444,23 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, | |||
| 2342 | } | 2444 | } |
| 2343 | 2445 | ||
| 2344 | static int | 2446 | static int |
| 2345 | gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, | 2447 | gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, |
| 2346 | u64 offset, u32 len, | 2448 | u64 offset, u32 len, |
| 2347 | unsigned dispatch_flags) | 2449 | unsigned dispatch_flags) |
| 2348 | { | 2450 | { |
| 2451 | struct intel_engine_cs *ring = req->ring; | ||
| 2349 | bool ppgtt = USES_PPGTT(ring->dev) && | 2452 | bool ppgtt = USES_PPGTT(ring->dev) && |
| 2350 | !(dispatch_flags & I915_DISPATCH_SECURE); | 2453 | !(dispatch_flags & I915_DISPATCH_SECURE); |
| 2351 | int ret; | 2454 | int ret; |
| 2352 | 2455 | ||
| 2353 | ret = intel_ring_begin(ring, 4); | 2456 | ret = intel_ring_begin(req, 4); |
| 2354 | if (ret) | 2457 | if (ret) |
| 2355 | return ret; | 2458 | return ret; |
| 2356 | 2459 | ||
| 2357 | /* FIXME(BDW): Address space and security selectors. */ | 2460 | /* FIXME(BDW): Address space and security selectors. */ |
| 2358 | intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); | 2461 | intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | |
| 2462 | (dispatch_flags & I915_DISPATCH_RS ? | ||
| 2463 | MI_BATCH_RESOURCE_STREAMER : 0)); | ||
| 2359 | intel_ring_emit(ring, lower_32_bits(offset)); | 2464 | intel_ring_emit(ring, lower_32_bits(offset)); |
| 2360 | intel_ring_emit(ring, upper_32_bits(offset)); | 2465 | intel_ring_emit(ring, upper_32_bits(offset)); |
| 2361 | intel_ring_emit(ring, MI_NOOP); | 2466 | intel_ring_emit(ring, MI_NOOP); |
| @@ -2365,20 +2470,23 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
| 2365 | } | 2470 | } |
| 2366 | 2471 | ||
| 2367 | static int | 2472 | static int |
| 2368 | hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, | 2473 | hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, |
| 2369 | u64 offset, u32 len, | 2474 | u64 offset, u32 len, |
| 2370 | unsigned dispatch_flags) | 2475 | unsigned dispatch_flags) |
| 2371 | { | 2476 | { |
| 2477 | struct intel_engine_cs *ring = req->ring; | ||
| 2372 | int ret; | 2478 | int ret; |
| 2373 | 2479 | ||
| 2374 | ret = intel_ring_begin(ring, 2); | 2480 | ret = intel_ring_begin(req, 2); |
| 2375 | if (ret) | 2481 | if (ret) |
| 2376 | return ret; | 2482 | return ret; |
| 2377 | 2483 | ||
| 2378 | intel_ring_emit(ring, | 2484 | intel_ring_emit(ring, |
| 2379 | MI_BATCH_BUFFER_START | | 2485 | MI_BATCH_BUFFER_START | |
| 2380 | (dispatch_flags & I915_DISPATCH_SECURE ? | 2486 | (dispatch_flags & I915_DISPATCH_SECURE ? |
| 2381 | 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW)); | 2487 | 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | |
| 2488 | (dispatch_flags & I915_DISPATCH_RS ? | ||
| 2489 | MI_BATCH_RESOURCE_STREAMER : 0)); | ||
| 2382 | /* bit0-7 is the length on GEN6+ */ | 2490 | /* bit0-7 is the length on GEN6+ */ |
| 2383 | intel_ring_emit(ring, offset); | 2491 | intel_ring_emit(ring, offset); |
| 2384 | intel_ring_advance(ring); | 2492 | intel_ring_advance(ring); |
| @@ -2387,13 +2495,14 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
| 2387 | } | 2495 | } |
| 2388 | 2496 | ||
| 2389 | static int | 2497 | static int |
| 2390 | gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, | 2498 | gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, |
| 2391 | u64 offset, u32 len, | 2499 | u64 offset, u32 len, |
| 2392 | unsigned dispatch_flags) | 2500 | unsigned dispatch_flags) |
| 2393 | { | 2501 | { |
| 2502 | struct intel_engine_cs *ring = req->ring; | ||
| 2394 | int ret; | 2503 | int ret; |
| 2395 | 2504 | ||
| 2396 | ret = intel_ring_begin(ring, 2); | 2505 | ret = intel_ring_begin(req, 2); |
| 2397 | if (ret) | 2506 | if (ret) |
| 2398 | return ret; | 2507 | return ret; |
| 2399 | 2508 | ||
| @@ -2410,14 +2519,15 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
| 2410 | 2519 | ||
| 2411 | /* Blitter support (SandyBridge+) */ | 2520 | /* Blitter support (SandyBridge+) */ |
| 2412 | 2521 | ||
| 2413 | static int gen6_ring_flush(struct intel_engine_cs *ring, | 2522 | static int gen6_ring_flush(struct drm_i915_gem_request *req, |
| 2414 | u32 invalidate, u32 flush) | 2523 | u32 invalidate, u32 flush) |
| 2415 | { | 2524 | { |
| 2525 | struct intel_engine_cs *ring = req->ring; | ||
| 2416 | struct drm_device *dev = ring->dev; | 2526 | struct drm_device *dev = ring->dev; |
| 2417 | uint32_t cmd; | 2527 | uint32_t cmd; |
| 2418 | int ret; | 2528 | int ret; |
| 2419 | 2529 | ||
| 2420 | ret = intel_ring_begin(ring, 4); | 2530 | ret = intel_ring_begin(req, 4); |
| 2421 | if (ret) | 2531 | if (ret) |
| 2422 | return ret; | 2532 | return ret; |
| 2423 | 2533 | ||
| @@ -2818,26 +2928,28 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) | |||
| 2818 | } | 2928 | } |
| 2819 | 2929 | ||
| 2820 | int | 2930 | int |
| 2821 | intel_ring_flush_all_caches(struct intel_engine_cs *ring) | 2931 | intel_ring_flush_all_caches(struct drm_i915_gem_request *req) |
| 2822 | { | 2932 | { |
| 2933 | struct intel_engine_cs *ring = req->ring; | ||
| 2823 | int ret; | 2934 | int ret; |
| 2824 | 2935 | ||
| 2825 | if (!ring->gpu_caches_dirty) | 2936 | if (!ring->gpu_caches_dirty) |
| 2826 | return 0; | 2937 | return 0; |
| 2827 | 2938 | ||
| 2828 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); | 2939 | ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS); |
| 2829 | if (ret) | 2940 | if (ret) |
| 2830 | return ret; | 2941 | return ret; |
| 2831 | 2942 | ||
| 2832 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); | 2943 | trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); |
| 2833 | 2944 | ||
| 2834 | ring->gpu_caches_dirty = false; | 2945 | ring->gpu_caches_dirty = false; |
| 2835 | return 0; | 2946 | return 0; |
| 2836 | } | 2947 | } |
| 2837 | 2948 | ||
| 2838 | int | 2949 | int |
| 2839 | intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) | 2950 | intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) |
| 2840 | { | 2951 | { |
| 2952 | struct intel_engine_cs *ring = req->ring; | ||
| 2841 | uint32_t flush_domains; | 2953 | uint32_t flush_domains; |
| 2842 | int ret; | 2954 | int ret; |
| 2843 | 2955 | ||
| @@ -2845,11 +2957,11 @@ intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) | |||
| 2845 | if (ring->gpu_caches_dirty) | 2957 | if (ring->gpu_caches_dirty) |
| 2846 | flush_domains = I915_GEM_GPU_DOMAINS; | 2958 | flush_domains = I915_GEM_GPU_DOMAINS; |
| 2847 | 2959 | ||
| 2848 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | 2960 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); |
| 2849 | if (ret) | 2961 | if (ret) |
| 2850 | return ret; | 2962 | return ret; |
| 2851 | 2963 | ||
| 2852 | trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | 2964 | trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); |
| 2853 | 2965 | ||
| 2854 | ring->gpu_caches_dirty = false; | 2966 | ring->gpu_caches_dirty = false; |
| 2855 | return 0; | 2967 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 4be66f60504d..2e85fda94963 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | * workarounds! | 12 | * workarounds! |
| 13 | */ | 13 | */ |
| 14 | #define CACHELINE_BYTES 64 | 14 | #define CACHELINE_BYTES 64 |
| 15 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) | ||
| 15 | 16 | ||
| 16 | /* | 17 | /* |
| 17 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | 18 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
| @@ -105,6 +106,9 @@ struct intel_ringbuffer { | |||
| 105 | int space; | 106 | int space; |
| 106 | int size; | 107 | int size; |
| 107 | int effective_size; | 108 | int effective_size; |
| 109 | int reserved_size; | ||
| 110 | int reserved_tail; | ||
| 111 | bool reserved_in_use; | ||
| 108 | 112 | ||
| 109 | /** We track the position of the requests in the ring buffer, and | 113 | /** We track the position of the requests in the ring buffer, and |
| 110 | * when each is retired we increment last_retired_head as the GPU | 114 | * when each is retired we increment last_retired_head as the GPU |
| @@ -120,6 +124,25 @@ struct intel_ringbuffer { | |||
| 120 | struct intel_context; | 124 | struct intel_context; |
| 121 | struct drm_i915_reg_descriptor; | 125 | struct drm_i915_reg_descriptor; |
| 122 | 126 | ||
| 127 | /* | ||
| 128 | * we use a single page to load ctx workarounds so all of these | ||
| 129 | * values are referred in terms of dwords | ||
| 130 | * | ||
| 131 | * struct i915_wa_ctx_bb: | ||
| 132 | * offset: specifies batch starting position, also helpful in case | ||
| 133 | * if we want to have multiple batches at different offsets based on | ||
| 134 | * some criteria. It is not a requirement at the moment but provides | ||
| 135 | * an option for future use. | ||
| 136 | * size: size of the batch in DWORDS | ||
| 137 | */ | ||
| 138 | struct i915_ctx_workarounds { | ||
| 139 | struct i915_wa_ctx_bb { | ||
| 140 | u32 offset; | ||
| 141 | u32 size; | ||
| 142 | } indirect_ctx, per_ctx; | ||
| 143 | struct drm_i915_gem_object *obj; | ||
| 144 | }; | ||
| 145 | |||
| 123 | struct intel_engine_cs { | 146 | struct intel_engine_cs { |
| 124 | const char *name; | 147 | const char *name; |
| 125 | enum intel_ring_id { | 148 | enum intel_ring_id { |
| @@ -143,6 +166,7 @@ struct intel_engine_cs { | |||
| 143 | struct i915_gem_batch_pool batch_pool; | 166 | struct i915_gem_batch_pool batch_pool; |
| 144 | 167 | ||
| 145 | struct intel_hw_status_page status_page; | 168 | struct intel_hw_status_page status_page; |
| 169 | struct i915_ctx_workarounds wa_ctx; | ||
| 146 | 170 | ||
| 147 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ | 171 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
| 148 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | 172 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
| @@ -152,15 +176,14 @@ struct intel_engine_cs { | |||
| 152 | 176 | ||
| 153 | int (*init_hw)(struct intel_engine_cs *ring); | 177 | int (*init_hw)(struct intel_engine_cs *ring); |
| 154 | 178 | ||
| 155 | int (*init_context)(struct intel_engine_cs *ring, | 179 | int (*init_context)(struct drm_i915_gem_request *req); |
| 156 | struct intel_context *ctx); | ||
| 157 | 180 | ||
| 158 | void (*write_tail)(struct intel_engine_cs *ring, | 181 | void (*write_tail)(struct intel_engine_cs *ring, |
| 159 | u32 value); | 182 | u32 value); |
| 160 | int __must_check (*flush)(struct intel_engine_cs *ring, | 183 | int __must_check (*flush)(struct drm_i915_gem_request *req, |
| 161 | u32 invalidate_domains, | 184 | u32 invalidate_domains, |
| 162 | u32 flush_domains); | 185 | u32 flush_domains); |
| 163 | int (*add_request)(struct intel_engine_cs *ring); | 186 | int (*add_request)(struct drm_i915_gem_request *req); |
| 164 | /* Some chipsets are not quite as coherent as advertised and need | 187 | /* Some chipsets are not quite as coherent as advertised and need |
| 165 | * an expensive kick to force a true read of the up-to-date seqno. | 188 | * an expensive kick to force a true read of the up-to-date seqno. |
| 166 | * However, the up-to-date seqno is not always required and the last | 189 | * However, the up-to-date seqno is not always required and the last |
| @@ -171,11 +194,12 @@ struct intel_engine_cs { | |||
| 171 | bool lazy_coherency); | 194 | bool lazy_coherency); |
| 172 | void (*set_seqno)(struct intel_engine_cs *ring, | 195 | void (*set_seqno)(struct intel_engine_cs *ring, |
| 173 | u32 seqno); | 196 | u32 seqno); |
| 174 | int (*dispatch_execbuffer)(struct intel_engine_cs *ring, | 197 | int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, |
| 175 | u64 offset, u32 length, | 198 | u64 offset, u32 length, |
| 176 | unsigned dispatch_flags); | 199 | unsigned dispatch_flags); |
| 177 | #define I915_DISPATCH_SECURE 0x1 | 200 | #define I915_DISPATCH_SECURE 0x1 |
| 178 | #define I915_DISPATCH_PINNED 0x2 | 201 | #define I915_DISPATCH_PINNED 0x2 |
| 202 | #define I915_DISPATCH_RS 0x4 | ||
| 179 | void (*cleanup)(struct intel_engine_cs *ring); | 203 | void (*cleanup)(struct intel_engine_cs *ring); |
| 180 | 204 | ||
| 181 | /* GEN8 signal/wait table - never trust comments! | 205 | /* GEN8 signal/wait table - never trust comments! |
| @@ -229,10 +253,10 @@ struct intel_engine_cs { | |||
| 229 | }; | 253 | }; |
| 230 | 254 | ||
| 231 | /* AKA wait() */ | 255 | /* AKA wait() */ |
| 232 | int (*sync_to)(struct intel_engine_cs *ring, | 256 | int (*sync_to)(struct drm_i915_gem_request *to_req, |
| 233 | struct intel_engine_cs *to, | 257 | struct intel_engine_cs *from, |
| 234 | u32 seqno); | 258 | u32 seqno); |
| 235 | int (*signal)(struct intel_engine_cs *signaller, | 259 | int (*signal)(struct drm_i915_gem_request *signaller_req, |
| 236 | /* num_dwords needed by caller */ | 260 | /* num_dwords needed by caller */ |
| 237 | unsigned int num_dwords); | 261 | unsigned int num_dwords); |
| 238 | } semaphore; | 262 | } semaphore; |
| @@ -243,14 +267,11 @@ struct intel_engine_cs { | |||
| 243 | struct list_head execlist_retired_req_list; | 267 | struct list_head execlist_retired_req_list; |
| 244 | u8 next_context_status_buffer; | 268 | u8 next_context_status_buffer; |
| 245 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ | 269 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ |
| 246 | int (*emit_request)(struct intel_ringbuffer *ringbuf, | 270 | int (*emit_request)(struct drm_i915_gem_request *request); |
| 247 | struct drm_i915_gem_request *request); | 271 | int (*emit_flush)(struct drm_i915_gem_request *request, |
| 248 | int (*emit_flush)(struct intel_ringbuffer *ringbuf, | ||
| 249 | struct intel_context *ctx, | ||
| 250 | u32 invalidate_domains, | 272 | u32 invalidate_domains, |
| 251 | u32 flush_domains); | 273 | u32 flush_domains); |
| 252 | int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, | 274 | int (*emit_bb_start)(struct drm_i915_gem_request *req, |
| 253 | struct intel_context *ctx, | ||
| 254 | u64 offset, unsigned dispatch_flags); | 275 | u64 offset, unsigned dispatch_flags); |
| 255 | 276 | ||
| 256 | /** | 277 | /** |
| @@ -272,10 +293,6 @@ struct intel_engine_cs { | |||
| 272 | struct list_head request_list; | 293 | struct list_head request_list; |
| 273 | 294 | ||
| 274 | /** | 295 | /** |
| 275 | * Do we have some not yet emitted requests outstanding? | ||
| 276 | */ | ||
| 277 | struct drm_i915_gem_request *outstanding_lazy_request; | ||
| 278 | /** | ||
| 279 | * Seqno of request most recently submitted to request_list. | 296 | * Seqno of request most recently submitted to request_list. |
| 280 | * Used exclusively by hang checker to avoid grabbing lock while | 297 | * Used exclusively by hang checker to avoid grabbing lock while |
| 281 | * inspecting request list. | 298 | * inspecting request list. |
| @@ -408,8 +425,8 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); | |||
| 408 | 425 | ||
| 409 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); | 426 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
| 410 | 427 | ||
| 411 | int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); | 428 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
| 412 | int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); | 429 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
| 413 | static inline void intel_ring_emit(struct intel_engine_cs *ring, | 430 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
| 414 | u32 data) | 431 | u32 data) |
| 415 | { | 432 | { |
| @@ -426,12 +443,11 @@ int __intel_ring_space(int head, int tail, int size); | |||
| 426 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); | 443 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
| 427 | int intel_ring_space(struct intel_ringbuffer *ringbuf); | 444 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
| 428 | bool intel_ring_stopped(struct intel_engine_cs *ring); | 445 | bool intel_ring_stopped(struct intel_engine_cs *ring); |
| 429 | void __intel_ring_advance(struct intel_engine_cs *ring); | ||
| 430 | 446 | ||
| 431 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); | 447 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); |
| 432 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); | 448 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); |
| 433 | int intel_ring_flush_all_caches(struct intel_engine_cs *ring); | 449 | int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); |
| 434 | int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); | 450 | int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); |
| 435 | 451 | ||
| 436 | void intel_fini_pipe_control(struct intel_engine_cs *ring); | 452 | void intel_fini_pipe_control(struct intel_engine_cs *ring); |
| 437 | int intel_init_pipe_control(struct intel_engine_cs *ring); | 453 | int intel_init_pipe_control(struct intel_engine_cs *ring); |
| @@ -451,11 +467,29 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) | |||
| 451 | return ringbuf->tail; | 467 | return ringbuf->tail; |
| 452 | } | 468 | } |
| 453 | 469 | ||
| 454 | static inline struct drm_i915_gem_request * | 470 | /* |
| 455 | intel_ring_get_request(struct intel_engine_cs *ring) | 471 | * Arbitrary size for largest possible 'add request' sequence. The code paths |
| 456 | { | 472 | * are complex and variable. Empirical measurement shows that the worst case |
| 457 | BUG_ON(ring->outstanding_lazy_request == NULL); | 473 | * is ILK at 136 words. Reserving too much is better than reserving too little |
| 458 | return ring->outstanding_lazy_request; | 474 | * as that allows for corner cases that might have been missed. So the figure |
| 459 | } | 475 | * has been rounded up to 160 words. |
| 476 | */ | ||
| 477 | #define MIN_SPACE_FOR_ADD_REQUEST 160 | ||
| 478 | |||
| 479 | /* | ||
| 480 | * Reserve space in the ring to guarantee that the i915_add_request() call | ||
| 481 | * will always have sufficient room to do its stuff. The request creation | ||
| 482 | * code calls this automatically. | ||
| 483 | */ | ||
| 484 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); | ||
| 485 | /* Cancel the reservation, e.g. because the request is being discarded. */ | ||
| 486 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); | ||
| 487 | /* Use the reserved space - for use by i915_add_request() only. */ | ||
| 488 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); | ||
| 489 | /* Finish with the reserved space - for use by i915_add_request() only. */ | ||
| 490 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); | ||
| 491 | |||
| 492 | /* Legacy ringbuffer specific portion of reservation code: */ | ||
| 493 | int intel_ring_reserve_space(struct drm_i915_gem_request *request); | ||
| 460 | 494 | ||
| 461 | #endif /* _INTEL_RINGBUFFER_H_ */ | 495 | #endif /* _INTEL_RINGBUFFER_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 1a45385f4d66..6393b76f87ff 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
| @@ -835,12 +835,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, | |||
| 835 | return enabled; | 835 | return enabled; |
| 836 | } | 836 | } |
| 837 | 837 | ||
| 838 | static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, | 838 | static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) |
| 839 | struct i915_power_well *power_well) | ||
| 840 | { | 839 | { |
| 841 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); | ||
| 842 | |||
| 843 | vlv_set_power_well(dev_priv, power_well, true); | ||
| 844 | 840 | ||
| 845 | spin_lock_irq(&dev_priv->irq_lock); | 841 | spin_lock_irq(&dev_priv->irq_lock); |
| 846 | valleyview_enable_display_irqs(dev_priv); | 842 | valleyview_enable_display_irqs(dev_priv); |
| @@ -858,18 +854,33 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, | |||
| 858 | i915_redisable_vga_power_on(dev_priv->dev); | 854 | i915_redisable_vga_power_on(dev_priv->dev); |
| 859 | } | 855 | } |
| 860 | 856 | ||
| 857 | static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) | ||
| 858 | { | ||
| 859 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 860 | valleyview_disable_display_irqs(dev_priv); | ||
| 861 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 862 | |||
| 863 | vlv_power_sequencer_reset(dev_priv); | ||
| 864 | } | ||
| 865 | |||
| 866 | static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, | ||
| 867 | struct i915_power_well *power_well) | ||
| 868 | { | ||
| 869 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); | ||
| 870 | |||
| 871 | vlv_set_power_well(dev_priv, power_well, true); | ||
| 872 | |||
| 873 | vlv_display_power_well_init(dev_priv); | ||
| 874 | } | ||
| 875 | |||
| 861 | static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, | 876 | static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, |
| 862 | struct i915_power_well *power_well) | 877 | struct i915_power_well *power_well) |
| 863 | { | 878 | { |
| 864 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); | 879 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); |
| 865 | 880 | ||
| 866 | spin_lock_irq(&dev_priv->irq_lock); | 881 | vlv_display_power_well_deinit(dev_priv); |
| 867 | valleyview_disable_display_irqs(dev_priv); | ||
| 868 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 869 | 882 | ||
| 870 | vlv_set_power_well(dev_priv, power_well, false); | 883 | vlv_set_power_well(dev_priv, power_well, false); |
| 871 | |||
| 872 | vlv_power_sequencer_reset(dev_priv); | ||
| 873 | } | 884 | } |
| 874 | 885 | ||
| 875 | static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | 886 | static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, |
| @@ -882,8 +893,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | |||
| 882 | * display and the reference clock for VGA | 893 | * display and the reference clock for VGA |
| 883 | * hotplug / manual detection. | 894 | * hotplug / manual detection. |
| 884 | */ | 895 | */ |
| 885 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | 896 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS | |
| 886 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | 897 | DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); |
| 887 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | 898 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ |
| 888 | 899 | ||
| 889 | vlv_set_power_well(dev_priv, power_well, true); | 900 | vlv_set_power_well(dev_priv, power_well, true); |
| @@ -933,14 +944,14 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | |||
| 933 | */ | 944 | */ |
| 934 | if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { | 945 | if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { |
| 935 | phy = DPIO_PHY0; | 946 | phy = DPIO_PHY0; |
| 936 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | 947 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS | |
| 937 | DPLL_REFA_CLK_ENABLE_VLV); | 948 | DPLL_REF_CLK_ENABLE_VLV); |
| 938 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | 949 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS | |
| 939 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | 950 | DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); |
| 940 | } else { | 951 | } else { |
| 941 | phy = DPIO_PHY1; | 952 | phy = DPIO_PHY1; |
| 942 | I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | | 953 | I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS | |
| 943 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | 954 | DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); |
| 944 | } | 955 | } |
| 945 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | 956 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ |
| 946 | vlv_set_power_well(dev_priv, power_well, true); | 957 | vlv_set_power_well(dev_priv, power_well, true); |
| @@ -1042,53 +1053,29 @@ out: | |||
| 1042 | static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, | 1053 | static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, |
| 1043 | struct i915_power_well *power_well) | 1054 | struct i915_power_well *power_well) |
| 1044 | { | 1055 | { |
| 1056 | WARN_ON_ONCE(power_well->data != PIPE_A); | ||
| 1057 | |||
| 1045 | chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); | 1058 | chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); |
| 1046 | } | 1059 | } |
| 1047 | 1060 | ||
| 1048 | static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, | 1061 | static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, |
| 1049 | struct i915_power_well *power_well) | 1062 | struct i915_power_well *power_well) |
| 1050 | { | 1063 | { |
| 1051 | WARN_ON_ONCE(power_well->data != PIPE_A && | 1064 | WARN_ON_ONCE(power_well->data != PIPE_A); |
| 1052 | power_well->data != PIPE_B && | ||
| 1053 | power_well->data != PIPE_C); | ||
| 1054 | 1065 | ||
| 1055 | chv_set_pipe_power_well(dev_priv, power_well, true); | 1066 | chv_set_pipe_power_well(dev_priv, power_well, true); |
| 1056 | 1067 | ||
| 1057 | if (power_well->data == PIPE_A) { | 1068 | vlv_display_power_well_init(dev_priv); |
| 1058 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 1059 | valleyview_enable_display_irqs(dev_priv); | ||
| 1060 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 1061 | |||
| 1062 | /* | ||
| 1063 | * During driver initialization/resume we can avoid restoring the | ||
| 1064 | * part of the HW/SW state that will be inited anyway explicitly. | ||
| 1065 | */ | ||
| 1066 | if (dev_priv->power_domains.initializing) | ||
| 1067 | return; | ||
| 1068 | |||
| 1069 | intel_hpd_init(dev_priv); | ||
| 1070 | |||
| 1071 | i915_redisable_vga_power_on(dev_priv->dev); | ||
| 1072 | } | ||
| 1073 | } | 1069 | } |
| 1074 | 1070 | ||
| 1075 | static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, | 1071 | static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, |
| 1076 | struct i915_power_well *power_well) | 1072 | struct i915_power_well *power_well) |
| 1077 | { | 1073 | { |
| 1078 | WARN_ON_ONCE(power_well->data != PIPE_A && | 1074 | WARN_ON_ONCE(power_well->data != PIPE_A); |
| 1079 | power_well->data != PIPE_B && | ||
| 1080 | power_well->data != PIPE_C); | ||
| 1081 | 1075 | ||
| 1082 | if (power_well->data == PIPE_A) { | 1076 | vlv_display_power_well_deinit(dev_priv); |
| 1083 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 1084 | valleyview_disable_display_irqs(dev_priv); | ||
| 1085 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 1086 | } | ||
| 1087 | 1077 | ||
| 1088 | chv_set_pipe_power_well(dev_priv, power_well, false); | 1078 | chv_set_pipe_power_well(dev_priv, power_well, false); |
| 1089 | |||
| 1090 | if (power_well->data == PIPE_A) | ||
| 1091 | vlv_power_sequencer_reset(dev_priv); | ||
| 1092 | } | 1079 | } |
| 1093 | 1080 | ||
| 1094 | /** | 1081 | /** |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 8193a35388d7..9d8af2f8a875 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -75,10 +75,8 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs) | |||
| 75 | * until a subsequent call to intel_pipe_update_end(). That is done to | 75 | * until a subsequent call to intel_pipe_update_end(). That is done to |
| 76 | * avoid random delays. The value written to @start_vbl_count should be | 76 | * avoid random delays. The value written to @start_vbl_count should be |
| 77 | * supplied to intel_pipe_update_end() for error checking. | 77 | * supplied to intel_pipe_update_end() for error checking. |
| 78 | * | ||
| 79 | * Return: true if the call was successful | ||
| 80 | */ | 78 | */ |
| 81 | bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) | 79 | void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) |
| 82 | { | 80 | { |
| 83 | struct drm_device *dev = crtc->base.dev; | 81 | struct drm_device *dev = crtc->base.dev; |
| 84 | const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; | 82 | const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; |
| @@ -96,13 +94,14 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) | |||
| 96 | min = vblank_start - usecs_to_scanlines(mode, 100); | 94 | min = vblank_start - usecs_to_scanlines(mode, 100); |
| 97 | max = vblank_start - 1; | 95 | max = vblank_start - 1; |
| 98 | 96 | ||
| 97 | local_irq_disable(); | ||
| 98 | *start_vbl_count = 0; | ||
| 99 | |||
| 99 | if (min <= 0 || max <= 0) | 100 | if (min <= 0 || max <= 0) |
| 100 | return false; | 101 | return; |
| 101 | 102 | ||
| 102 | if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) | 103 | if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) |
| 103 | return false; | 104 | return; |
| 104 | |||
| 105 | local_irq_disable(); | ||
| 106 | 105 | ||
| 107 | trace_i915_pipe_update_start(crtc, min, max); | 106 | trace_i915_pipe_update_start(crtc, min, max); |
| 108 | 107 | ||
| @@ -138,8 +137,6 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) | |||
| 138 | *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); | 137 | *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); |
| 139 | 138 | ||
| 140 | trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count); | 139 | trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count); |
| 141 | |||
| 142 | return true; | ||
| 143 | } | 140 | } |
| 144 | 141 | ||
| 145 | /** | 142 | /** |
| @@ -161,7 +158,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count) | |||
| 161 | 158 | ||
| 162 | local_irq_enable(); | 159 | local_irq_enable(); |
| 163 | 160 | ||
| 164 | if (start_vbl_count != end_vbl_count) | 161 | if (start_vbl_count && start_vbl_count != end_vbl_count) |
| 165 | DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n", | 162 | DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n", |
| 166 | pipe_name(pipe), start_vbl_count, end_vbl_count); | 163 | pipe_name(pipe), start_vbl_count, end_vbl_count); |
| 167 | } | 164 | } |
| @@ -182,7 +179,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, | |||
| 182 | const int plane = intel_plane->plane + 1; | 179 | const int plane = intel_plane->plane + 1; |
| 183 | u32 plane_ctl, stride_div, stride; | 180 | u32 plane_ctl, stride_div, stride; |
| 184 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | 181 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
| 185 | const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; | 182 | const struct drm_intel_sprite_colorkey *key = |
| 183 | &to_intel_plane_state(drm_plane->state)->ckey; | ||
| 186 | unsigned long surf_addr; | 184 | unsigned long surf_addr; |
| 187 | u32 tile_height, plane_offset, plane_size; | 185 | u32 tile_height, plane_offset, plane_size; |
| 188 | unsigned int rotation; | 186 | unsigned int rotation; |
| @@ -272,7 +270,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, | |||
| 272 | } | 270 | } |
| 273 | 271 | ||
| 274 | static void | 272 | static void |
| 275 | skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force) | 273 | skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) |
| 276 | { | 274 | { |
| 277 | struct drm_device *dev = dplane->dev; | 275 | struct drm_device *dev = dplane->dev; |
| 278 | struct drm_i915_private *dev_priv = dev->dev_private; | 276 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -344,7 +342,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, | |||
| 344 | u32 sprctl; | 342 | u32 sprctl; |
| 345 | unsigned long sprsurf_offset, linear_offset; | 343 | unsigned long sprsurf_offset, linear_offset; |
| 346 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | 344 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
| 347 | const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; | 345 | const struct drm_intel_sprite_colorkey *key = |
| 346 | &to_intel_plane_state(dplane->state)->ckey; | ||
| 348 | 347 | ||
| 349 | sprctl = SP_ENABLE; | 348 | sprctl = SP_ENABLE; |
| 350 | 349 | ||
| @@ -400,10 +399,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, | |||
| 400 | if (obj->tiling_mode != I915_TILING_NONE) | 399 | if (obj->tiling_mode != I915_TILING_NONE) |
| 401 | sprctl |= SP_TILED; | 400 | sprctl |= SP_TILED; |
| 402 | 401 | ||
| 403 | intel_update_sprite_watermarks(dplane, crtc, src_w, src_h, | ||
| 404 | pixel_size, true, | ||
| 405 | src_w != crtc_w || src_h != crtc_h); | ||
| 406 | |||
| 407 | /* Sizes are 0 based */ | 402 | /* Sizes are 0 based */ |
| 408 | src_w--; | 403 | src_w--; |
| 409 | src_h--; | 404 | src_h--; |
| @@ -411,7 +406,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, | |||
| 411 | crtc_h--; | 406 | crtc_h--; |
| 412 | 407 | ||
| 413 | linear_offset = y * fb->pitches[0] + x * pixel_size; | 408 | linear_offset = y * fb->pitches[0] + x * pixel_size; |
| 414 | sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, | 409 | sprsurf_offset = intel_gen4_compute_page_offset(dev_priv, |
| 410 | &x, &y, | ||
| 415 | obj->tiling_mode, | 411 | obj->tiling_mode, |
| 416 | pixel_size, | 412 | pixel_size, |
| 417 | fb->pitches[0]); | 413 | fb->pitches[0]); |
| @@ -455,7 +451,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, | |||
| 455 | } | 451 | } |
| 456 | 452 | ||
| 457 | static void | 453 | static void |
| 458 | vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force) | 454 | vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) |
| 459 | { | 455 | { |
| 460 | struct drm_device *dev = dplane->dev; | 456 | struct drm_device *dev = dplane->dev; |
| 461 | struct drm_i915_private *dev_priv = dev->dev_private; | 457 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -467,8 +463,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force) | |||
| 467 | 463 | ||
| 468 | I915_WRITE(SPSURF(pipe, plane), 0); | 464 | I915_WRITE(SPSURF(pipe, plane), 0); |
| 469 | POSTING_READ(SPSURF(pipe, plane)); | 465 | POSTING_READ(SPSURF(pipe, plane)); |
| 470 | |||
| 471 | intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false); | ||
| 472 | } | 466 | } |
| 473 | 467 | ||
| 474 | static void | 468 | static void |
| @@ -487,7 +481,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 487 | u32 sprctl, sprscale = 0; | 481 | u32 sprctl, sprscale = 0; |
| 488 | unsigned long sprsurf_offset, linear_offset; | 482 | unsigned long sprsurf_offset, linear_offset; |
| 489 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | 483 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
| 490 | const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; | 484 | const struct drm_intel_sprite_colorkey *key = |
| 485 | &to_intel_plane_state(plane->state)->ckey; | ||
| 491 | 486 | ||
| 492 | sprctl = SPRITE_ENABLE; | 487 | sprctl = SPRITE_ENABLE; |
| 493 | 488 | ||
| @@ -546,7 +541,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 546 | 541 | ||
| 547 | linear_offset = y * fb->pitches[0] + x * pixel_size; | 542 | linear_offset = y * fb->pitches[0] + x * pixel_size; |
| 548 | sprsurf_offset = | 543 | sprsurf_offset = |
| 549 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, | 544 | intel_gen4_compute_page_offset(dev_priv, |
| 545 | &x, &y, obj->tiling_mode, | ||
| 550 | pixel_size, fb->pitches[0]); | 546 | pixel_size, fb->pitches[0]); |
| 551 | linear_offset -= sprsurf_offset; | 547 | linear_offset -= sprsurf_offset; |
| 552 | 548 | ||
| @@ -595,7 +591,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 595 | } | 591 | } |
| 596 | 592 | ||
| 597 | static void | 593 | static void |
| 598 | ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force) | 594 | ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) |
| 599 | { | 595 | { |
| 600 | struct drm_device *dev = plane->dev; | 596 | struct drm_device *dev = plane->dev; |
| 601 | struct drm_i915_private *dev_priv = dev->dev_private; | 597 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -627,7 +623,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 627 | unsigned long dvssurf_offset, linear_offset; | 623 | unsigned long dvssurf_offset, linear_offset; |
| 628 | u32 dvscntr, dvsscale; | 624 | u32 dvscntr, dvsscale; |
| 629 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | 625 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
| 630 | const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; | 626 | const struct drm_intel_sprite_colorkey *key = |
| 627 | &to_intel_plane_state(plane->state)->ckey; | ||
| 631 | 628 | ||
| 632 | dvscntr = DVS_ENABLE; | 629 | dvscntr = DVS_ENABLE; |
| 633 | 630 | ||
| @@ -682,7 +679,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 682 | 679 | ||
| 683 | linear_offset = y * fb->pitches[0] + x * pixel_size; | 680 | linear_offset = y * fb->pitches[0] + x * pixel_size; |
| 684 | dvssurf_offset = | 681 | dvssurf_offset = |
| 685 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, | 682 | intel_gen4_compute_page_offset(dev_priv, |
| 683 | &x, &y, obj->tiling_mode, | ||
| 686 | pixel_size, fb->pitches[0]); | 684 | pixel_size, fb->pitches[0]); |
| 687 | linear_offset -= dvssurf_offset; | 685 | linear_offset -= dvssurf_offset; |
| 688 | 686 | ||
| @@ -722,7 +720,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 722 | } | 720 | } |
| 723 | 721 | ||
| 724 | static void | 722 | static void |
| 725 | ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force) | 723 | ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) |
| 726 | { | 724 | { |
| 727 | struct drm_device *dev = plane->dev; | 725 | struct drm_device *dev = plane->dev; |
| 728 | struct drm_i915_private *dev_priv = dev->dev_private; | 726 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -739,11 +737,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force) | |||
| 739 | 737 | ||
| 740 | static int | 738 | static int |
| 741 | intel_check_sprite_plane(struct drm_plane *plane, | 739 | intel_check_sprite_plane(struct drm_plane *plane, |
| 740 | struct intel_crtc_state *crtc_state, | ||
| 742 | struct intel_plane_state *state) | 741 | struct intel_plane_state *state) |
| 743 | { | 742 | { |
| 744 | struct drm_device *dev = plane->dev; | 743 | struct drm_device *dev = plane->dev; |
| 745 | struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); | 744 | struct drm_crtc *crtc = state->base.crtc; |
| 746 | struct intel_crtc_state *crtc_state; | 745 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 747 | struct intel_plane *intel_plane = to_intel_plane(plane); | 746 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 748 | struct drm_framebuffer *fb = state->base.fb; | 747 | struct drm_framebuffer *fb = state->base.fb; |
| 749 | int crtc_x, crtc_y; | 748 | int crtc_x, crtc_y; |
| @@ -756,15 +755,10 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
| 756 | int max_scale, min_scale; | 755 | int max_scale, min_scale; |
| 757 | bool can_scale; | 756 | bool can_scale; |
| 758 | int pixel_size; | 757 | int pixel_size; |
| 759 | int ret; | ||
| 760 | |||
| 761 | intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc); | ||
| 762 | crtc_state = state->base.state ? | ||
| 763 | intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL; | ||
| 764 | 758 | ||
| 765 | if (!fb) { | 759 | if (!fb) { |
| 766 | state->visible = false; | 760 | state->visible = false; |
| 767 | goto finish; | 761 | return 0; |
| 768 | } | 762 | } |
| 769 | 763 | ||
| 770 | /* Don't modify another pipe's plane */ | 764 | /* Don't modify another pipe's plane */ |
| @@ -782,7 +776,7 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
| 782 | /* setup can_scale, min_scale, max_scale */ | 776 | /* setup can_scale, min_scale, max_scale */ |
| 783 | if (INTEL_INFO(dev)->gen >= 9) { | 777 | if (INTEL_INFO(dev)->gen >= 9) { |
| 784 | /* use scaler when colorkey is not required */ | 778 | /* use scaler when colorkey is not required */ |
| 785 | if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) { | 779 | if (state->ckey.flags == I915_SET_COLORKEY_NONE) { |
| 786 | can_scale = 1; | 780 | can_scale = 1; |
| 787 | min_scale = 1; | 781 | min_scale = 1; |
| 788 | max_scale = skl_max_scale(intel_crtc, crtc_state); | 782 | max_scale = skl_max_scale(intel_crtc, crtc_state); |
| @@ -802,7 +796,6 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
| 802 | * coordinates and sizes. We probably need some way to decide whether | 796 | * coordinates and sizes. We probably need some way to decide whether |
| 803 | * more strict checking should be done instead. | 797 | * more strict checking should be done instead. |
| 804 | */ | 798 | */ |
| 805 | |||
| 806 | drm_rect_rotate(src, fb->width << 16, fb->height << 16, | 799 | drm_rect_rotate(src, fb->width << 16, fb->height << 16, |
| 807 | state->base.rotation); | 800 | state->base.rotation); |
| 808 | 801 | ||
| @@ -812,7 +805,7 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
| 812 | vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale); | 805 | vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale); |
| 813 | BUG_ON(vscale < 0); | 806 | BUG_ON(vscale < 0); |
| 814 | 807 | ||
| 815 | state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale); | 808 | state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale); |
| 816 | 809 | ||
| 817 | crtc_x = dst->x1; | 810 | crtc_x = dst->x1; |
| 818 | crtc_y = dst->y1; | 811 | crtc_y = dst->y1; |
| @@ -917,36 +910,6 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
| 917 | dst->y1 = crtc_y; | 910 | dst->y1 = crtc_y; |
| 918 | dst->y2 = crtc_y + crtc_h; | 911 | dst->y2 = crtc_y + crtc_h; |
| 919 | 912 | ||
| 920 | finish: | ||
| 921 | /* | ||
| 922 | * If the sprite is completely covering the primary plane, | ||
| 923 | * we can disable the primary and save power. | ||
| 924 | */ | ||
| 925 | if (intel_crtc->active) { | ||
| 926 | intel_crtc->atomic.fb_bits |= | ||
| 927 | INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe); | ||
| 928 | |||
| 929 | if (intel_wm_need_update(plane, &state->base)) | ||
| 930 | intel_crtc->atomic.update_wm = true; | ||
| 931 | |||
| 932 | if (!state->visible) { | ||
| 933 | /* | ||
| 934 | * Avoid underruns when disabling the sprite. | ||
| 935 | * FIXME remove once watermark updates are done properly. | ||
| 936 | */ | ||
| 937 | intel_crtc->atomic.wait_vblank = true; | ||
| 938 | intel_crtc->atomic.update_sprite_watermarks |= | ||
| 939 | (1 << drm_plane_index(plane)); | ||
| 940 | } | ||
| 941 | } | ||
| 942 | |||
| 943 | if (INTEL_INFO(dev)->gen >= 9) { | ||
| 944 | ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane, | ||
| 945 | state, 0); | ||
| 946 | if (ret) | ||
| 947 | return ret; | ||
| 948 | } | ||
| 949 | |||
| 950 | return 0; | 913 | return 0; |
| 951 | } | 914 | } |
| 952 | 915 | ||
| @@ -955,34 +918,27 @@ intel_commit_sprite_plane(struct drm_plane *plane, | |||
| 955 | struct intel_plane_state *state) | 918 | struct intel_plane_state *state) |
| 956 | { | 919 | { |
| 957 | struct drm_crtc *crtc = state->base.crtc; | 920 | struct drm_crtc *crtc = state->base.crtc; |
| 958 | struct intel_crtc *intel_crtc; | ||
| 959 | struct intel_plane *intel_plane = to_intel_plane(plane); | 921 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 960 | struct drm_framebuffer *fb = state->base.fb; | 922 | struct drm_framebuffer *fb = state->base.fb; |
| 961 | int crtc_x, crtc_y; | ||
| 962 | unsigned int crtc_w, crtc_h; | ||
| 963 | uint32_t src_x, src_y, src_w, src_h; | ||
| 964 | 923 | ||
| 965 | crtc = crtc ? crtc : plane->crtc; | 924 | crtc = crtc ? crtc : plane->crtc; |
| 966 | intel_crtc = to_intel_crtc(crtc); | ||
| 967 | 925 | ||
| 968 | plane->fb = fb; | 926 | plane->fb = fb; |
| 969 | 927 | ||
| 970 | if (intel_crtc->active) { | 928 | if (!crtc->state->active) |
| 971 | if (state->visible) { | 929 | return; |
| 972 | crtc_x = state->dst.x1; | 930 | |
| 973 | crtc_y = state->dst.y1; | 931 | if (state->visible) { |
| 974 | crtc_w = drm_rect_width(&state->dst); | 932 | intel_plane->update_plane(plane, crtc, fb, |
| 975 | crtc_h = drm_rect_height(&state->dst); | 933 | state->dst.x1, state->dst.y1, |
| 976 | src_x = state->src.x1 >> 16; | 934 | drm_rect_width(&state->dst), |
| 977 | src_y = state->src.y1 >> 16; | 935 | drm_rect_height(&state->dst), |
| 978 | src_w = drm_rect_width(&state->src) >> 16; | 936 | state->src.x1 >> 16, |
| 979 | src_h = drm_rect_height(&state->src) >> 16; | 937 | state->src.y1 >> 16, |
| 980 | intel_plane->update_plane(plane, crtc, fb, | 938 | drm_rect_width(&state->src) >> 16, |
| 981 | crtc_x, crtc_y, crtc_w, crtc_h, | 939 | drm_rect_height(&state->src) >> 16); |
| 982 | src_x, src_y, src_w, src_h); | 940 | } else { |
| 983 | } else { | 941 | intel_plane->disable_plane(plane, crtc); |
| 984 | intel_plane->disable_plane(plane, crtc, false); | ||
| 985 | } | ||
| 986 | } | 942 | } |
| 987 | } | 943 | } |
| 988 | 944 | ||
| @@ -991,7 +947,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | |||
| 991 | { | 947 | { |
| 992 | struct drm_intel_sprite_colorkey *set = data; | 948 | struct drm_intel_sprite_colorkey *set = data; |
| 993 | struct drm_plane *plane; | 949 | struct drm_plane *plane; |
| 994 | struct intel_plane *intel_plane; | 950 | struct drm_plane_state *plane_state; |
| 951 | struct drm_atomic_state *state; | ||
| 952 | struct drm_modeset_acquire_ctx ctx; | ||
| 995 | int ret = 0; | 953 | int ret = 0; |
| 996 | 954 | ||
| 997 | /* Make sure we don't try to enable both src & dest simultaneously */ | 955 | /* Make sure we don't try to enable both src & dest simultaneously */ |
| @@ -1002,50 +960,41 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | |||
| 1002 | set->flags & I915_SET_COLORKEY_DESTINATION) | 960 | set->flags & I915_SET_COLORKEY_DESTINATION) |
| 1003 | return -EINVAL; | 961 | return -EINVAL; |
| 1004 | 962 | ||
| 1005 | drm_modeset_lock_all(dev); | ||
| 1006 | |||
| 1007 | plane = drm_plane_find(dev, set->plane_id); | 963 | plane = drm_plane_find(dev, set->plane_id); |
| 1008 | if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { | 964 | if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) |
| 1009 | ret = -ENOENT; | 965 | return -ENOENT; |
| 1010 | goto out_unlock; | ||
| 1011 | } | ||
| 1012 | 966 | ||
| 1013 | intel_plane = to_intel_plane(plane); | 967 | drm_modeset_acquire_init(&ctx, 0); |
| 1014 | 968 | ||
| 1015 | if (INTEL_INFO(dev)->gen >= 9) { | 969 | state = drm_atomic_state_alloc(plane->dev); |
| 1016 | /* plane scaling and colorkey are mutually exclusive */ | 970 | if (!state) { |
| 1017 | if (to_intel_plane_state(plane->state)->scaler_id >= 0) { | 971 | ret = -ENOMEM; |
| 1018 | DRM_ERROR("colorkey not allowed with scaler\n"); | 972 | goto out; |
| 1019 | ret = -EINVAL; | ||
| 1020 | goto out_unlock; | ||
| 1021 | } | ||
| 1022 | } | 973 | } |
| 974 | state->acquire_ctx = &ctx; | ||
| 975 | |||
| 976 | while (1) { | ||
| 977 | plane_state = drm_atomic_get_plane_state(state, plane); | ||
| 978 | ret = PTR_ERR_OR_ZERO(plane_state); | ||
| 979 | if (!ret) { | ||
| 980 | to_intel_plane_state(plane_state)->ckey = *set; | ||
| 981 | ret = drm_atomic_commit(state); | ||
| 982 | } | ||
| 1023 | 983 | ||
| 1024 | intel_plane->ckey = *set; | 984 | if (ret != -EDEADLK) |
| 1025 | 985 | break; | |
| 1026 | /* | ||
| 1027 | * The only way this could fail would be due to | ||
| 1028 | * the current plane state being unsupportable already, | ||
| 1029 | * and we dont't consider that an error for the | ||
| 1030 | * colorkey ioctl. So just ignore any error. | ||
| 1031 | */ | ||
| 1032 | intel_plane_restore(plane); | ||
| 1033 | 986 | ||
| 1034 | out_unlock: | 987 | drm_atomic_state_clear(state); |
| 1035 | drm_modeset_unlock_all(dev); | 988 | drm_modeset_backoff(&ctx); |
| 1036 | return ret; | 989 | } |
| 1037 | } | ||
| 1038 | 990 | ||
| 1039 | int intel_plane_restore(struct drm_plane *plane) | 991 | if (ret) |
| 1040 | { | 992 | drm_atomic_state_free(state); |
| 1041 | if (!plane->crtc || !plane->state->fb) | ||
| 1042 | return 0; | ||
| 1043 | 993 | ||
| 1044 | return drm_plane_helper_update(plane, plane->crtc, plane->state->fb, | 994 | out: |
| 1045 | plane->state->crtc_x, plane->state->crtc_y, | 995 | drm_modeset_drop_locks(&ctx); |
| 1046 | plane->state->crtc_w, plane->state->crtc_h, | 996 | drm_modeset_acquire_fini(&ctx); |
| 1047 | plane->state->src_x, plane->state->src_y, | 997 | return ret; |
| 1048 | plane->state->src_w, plane->state->src_h); | ||
| 1049 | } | 998 | } |
| 1050 | 999 | ||
| 1051 | static const uint32_t ilk_plane_formats[] = { | 1000 | static const uint32_t ilk_plane_formats[] = { |
| @@ -1172,9 +1121,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) | |||
| 1172 | 1121 | ||
| 1173 | intel_plane->pipe = pipe; | 1122 | intel_plane->pipe = pipe; |
| 1174 | intel_plane->plane = plane; | 1123 | intel_plane->plane = plane; |
| 1124 | intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe); | ||
| 1175 | intel_plane->check_plane = intel_check_sprite_plane; | 1125 | intel_plane->check_plane = intel_check_sprite_plane; |
| 1176 | intel_plane->commit_plane = intel_commit_sprite_plane; | 1126 | intel_plane->commit_plane = intel_commit_sprite_plane; |
| 1177 | intel_plane->ckey.flags = I915_SET_COLORKEY_NONE; | ||
| 1178 | possible_crtcs = (1 << pipe); | 1127 | possible_crtcs = (1 << pipe); |
| 1179 | ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, | 1128 | ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, |
| 1180 | &intel_plane_funcs, | 1129 | &intel_plane_funcs, |
| @@ -1189,6 +1138,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) | |||
| 1189 | 1138 | ||
| 1190 | drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); | 1139 | drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); |
| 1191 | 1140 | ||
| 1192 | out: | 1141 | out: |
| 1193 | return ret; | 1142 | return ret; |
| 1194 | } | 1143 | } |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index a6d8a3ee7750..45285a9178fe 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
| @@ -1455,20 +1455,80 @@ static int gen6_do_reset(struct drm_device *dev) | |||
| 1455 | return ret; | 1455 | return ret; |
| 1456 | } | 1456 | } |
| 1457 | 1457 | ||
| 1458 | int intel_gpu_reset(struct drm_device *dev) | 1458 | static int wait_for_register(struct drm_i915_private *dev_priv, |
| 1459 | const u32 reg, | ||
| 1460 | const u32 mask, | ||
| 1461 | const u32 value, | ||
| 1462 | const unsigned long timeout_ms) | ||
| 1463 | { | ||
| 1464 | return wait_for((I915_READ(reg) & mask) == value, timeout_ms); | ||
| 1465 | } | ||
| 1466 | |||
| 1467 | static int gen8_do_reset(struct drm_device *dev) | ||
| 1459 | { | 1468 | { |
| 1460 | if (INTEL_INFO(dev)->gen >= 6) | 1469 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1461 | return gen6_do_reset(dev); | 1470 | struct intel_engine_cs *engine; |
| 1471 | int i; | ||
| 1472 | |||
| 1473 | for_each_ring(engine, dev_priv, i) { | ||
| 1474 | I915_WRITE(RING_RESET_CTL(engine->mmio_base), | ||
| 1475 | _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); | ||
| 1476 | |||
| 1477 | if (wait_for_register(dev_priv, | ||
| 1478 | RING_RESET_CTL(engine->mmio_base), | ||
| 1479 | RESET_CTL_READY_TO_RESET, | ||
| 1480 | RESET_CTL_READY_TO_RESET, | ||
| 1481 | 700)) { | ||
| 1482 | DRM_ERROR("%s: reset request timeout\n", engine->name); | ||
| 1483 | goto not_ready; | ||
| 1484 | } | ||
| 1485 | } | ||
| 1486 | |||
| 1487 | return gen6_do_reset(dev); | ||
| 1488 | |||
| 1489 | not_ready: | ||
| 1490 | for_each_ring(engine, dev_priv, i) | ||
| 1491 | I915_WRITE(RING_RESET_CTL(engine->mmio_base), | ||
| 1492 | _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); | ||
| 1493 | |||
| 1494 | return -EIO; | ||
| 1495 | } | ||
| 1496 | |||
| 1497 | static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *) | ||
| 1498 | { | ||
| 1499 | if (!i915.reset) | ||
| 1500 | return NULL; | ||
| 1501 | |||
| 1502 | if (INTEL_INFO(dev)->gen >= 8) | ||
| 1503 | return gen8_do_reset; | ||
| 1504 | else if (INTEL_INFO(dev)->gen >= 6) | ||
| 1505 | return gen6_do_reset; | ||
| 1462 | else if (IS_GEN5(dev)) | 1506 | else if (IS_GEN5(dev)) |
| 1463 | return ironlake_do_reset(dev); | 1507 | return ironlake_do_reset; |
| 1464 | else if (IS_G4X(dev)) | 1508 | else if (IS_G4X(dev)) |
| 1465 | return g4x_do_reset(dev); | 1509 | return g4x_do_reset; |
| 1466 | else if (IS_G33(dev)) | 1510 | else if (IS_G33(dev)) |
| 1467 | return g33_do_reset(dev); | 1511 | return g33_do_reset; |
| 1468 | else if (INTEL_INFO(dev)->gen >= 3) | 1512 | else if (INTEL_INFO(dev)->gen >= 3) |
| 1469 | return i915_do_reset(dev); | 1513 | return i915_do_reset; |
| 1470 | else | 1514 | else |
| 1515 | return NULL; | ||
| 1516 | } | ||
| 1517 | |||
| 1518 | int intel_gpu_reset(struct drm_device *dev) | ||
| 1519 | { | ||
| 1520 | int (*reset)(struct drm_device *); | ||
| 1521 | |||
| 1522 | reset = intel_get_gpu_reset(dev); | ||
| 1523 | if (reset == NULL) | ||
| 1471 | return -ENODEV; | 1524 | return -ENODEV; |
| 1525 | |||
| 1526 | return reset(dev); | ||
| 1527 | } | ||
| 1528 | |||
| 1529 | bool intel_has_gpu_reset(struct drm_device *dev) | ||
| 1530 | { | ||
| 1531 | return intel_get_gpu_reset(dev) != NULL; | ||
| 1472 | } | 1532 | } |
| 1473 | 1533 | ||
| 1474 | void intel_uncore_check_errors(struct drm_device *dev) | 1534 | void intel_uncore_check_errors(struct drm_device *dev) |
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index b08bdade6002..9e9bddaa58a5 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h | |||
| @@ -3,8 +3,8 @@ | |||
| 3 | #ifndef _DRM_INTEL_GTT_H | 3 | #ifndef _DRM_INTEL_GTT_H |
| 4 | #define _DRM_INTEL_GTT_H | 4 | #define _DRM_INTEL_GTT_H |
| 5 | 5 | ||
| 6 | void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, | 6 | void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, |
| 7 | phys_addr_t *mappable_base, unsigned long *mappable_end); | 7 | phys_addr_t *mappable_base, u64 *mappable_end); |
| 8 | 8 | ||
| 9 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | 9 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
| 10 | struct agp_bridge_data *bridge); | 10 | struct agp_bridge_data *bridge); |
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 6e1a2ed116cb..192027b4f031 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h | |||
| @@ -354,9 +354,15 @@ typedef struct drm_i915_irq_wait { | |||
| 354 | #define I915_PARAM_REVISION 32 | 354 | #define I915_PARAM_REVISION 32 |
| 355 | #define I915_PARAM_SUBSLICE_TOTAL 33 | 355 | #define I915_PARAM_SUBSLICE_TOTAL 33 |
| 356 | #define I915_PARAM_EU_TOTAL 34 | 356 | #define I915_PARAM_EU_TOTAL 34 |
| 357 | #define I915_PARAM_HAS_GPU_RESET 35 | ||
| 358 | #define I915_PARAM_HAS_RESOURCE_STREAMER 36 | ||
| 357 | 359 | ||
| 358 | typedef struct drm_i915_getparam { | 360 | typedef struct drm_i915_getparam { |
| 359 | int param; | 361 | s32 param; |
| 362 | /* | ||
| 363 | * WARNING: Using pointers instead of fixed-size u64 means we need to write | ||
| 364 | * compat32 code. Don't repeat this mistake. | ||
| 365 | */ | ||
| 360 | int __user *value; | 366 | int __user *value; |
| 361 | } drm_i915_getparam_t; | 367 | } drm_i915_getparam_t; |
| 362 | 368 | ||
| @@ -764,7 +770,12 @@ struct drm_i915_gem_execbuffer2 { | |||
| 764 | #define I915_EXEC_BSD_RING1 (1<<13) | 770 | #define I915_EXEC_BSD_RING1 (1<<13) |
| 765 | #define I915_EXEC_BSD_RING2 (2<<13) | 771 | #define I915_EXEC_BSD_RING2 (2<<13) |
| 766 | 772 | ||
| 767 | #define __I915_EXEC_UNKNOWN_FLAGS -(1<<15) | 773 | /** Tell the kernel that the batchbuffer is processed by |
| 774 | * the resource streamer. | ||
| 775 | */ | ||
| 776 | #define I915_EXEC_RESOURCE_STREAMER (1<<15) | ||
| 777 | |||
| 778 | #define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1) | ||
| 768 | 779 | ||
| 769 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) | 780 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
| 770 | #define i915_execbuffer2_set_context_id(eb2, context) \ | 781 | #define i915_execbuffer2_set_context_id(eb2, context) \ |
| @@ -1106,6 +1117,7 @@ struct drm_i915_gem_context_param { | |||
| 1106 | __u32 size; | 1117 | __u32 size; |
| 1107 | __u64 param; | 1118 | __u64 param; |
| 1108 | #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 | 1119 | #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 |
| 1120 | #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 | ||
| 1109 | __u64 value; | 1121 | __u64 value; |
| 1110 | }; | 1122 | }; |
| 1111 | 1123 | ||
