aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-08-04 03:57:34 -0400
committerDave Airlie <airlied@redhat.com>2014-08-04 03:57:34 -0400
commitc759606c96dc052373d4c36ea383595da46b04e9 (patch)
tree583a4234d43c57e03b85315353adf304970645fc
parent2ee39452fa2fff1e8edb954ccb7e0daee9646557 (diff)
parent4dac3edfe68e5e1b3c2216b84ba160572420fa40 (diff)
Merge tag 'drm-intel-next-2014-07-25-merged' of git://anongit.freedesktop.org/drm-intel into drm-next
Final feature pull for 3.17. drm-intel-next-2014-07-25: - Ditch UMS support (well just the config option for now) - Prep work for future platforms (Sonika Jindal, Damien) - runtime pm/soix fixes (Paulo, Jesse) - psr tracking improvements, locking fixes, now enabled by default! - rps fixes for chv (Deepak, Ville) - drm core patches for rotation support (Ville, Sagar Kamble) - the i915 parts unfortunately didn't make it yet - userptr fixes (Chris) - minimum backlight brightness (Jani), acked long ago by Matthew Garret on irc - I've forgotten about this patch :( QA is a bit unhappy about the DP MST stuff since it broke hpd testing a bit, but otherwise looks sane. I've backmerged drm-next to resolve conflicts with the mst stuff, which means the new tag itself doesn't contain the overview as usual. * tag 'drm-intel-next-2014-07-25-merged' of git://anongit.freedesktop.org/drm-intel: (75 commits) drm/i915/userptr: Keep spin_lock/unlock in the same block drm/i915: Allow overlapping userptr objects drm/i915: Ditch UMS config option drm/i915: respect the VBT minimum backlight brightness drm/i915: extract backlight minimum brightness from VBT drm/i915: Replace HAS_PCH_SPLIT which incorrectly lets some platforms in drm/i915: Returning from increase/decrease of pllclock when invalid drm/i915: Setting legacy palette correctly for different platforms drm/i915: Avoid incorrect returning for some platforms drm/i915: Writing proper check for reading of pipe status reg drm/i915: Returning the right VGA control reg for platforms drm/i915: Allowing changing of wm latencies for valid platforms drm/i915: Adding HAS_GMCH_DISPLAY macro drm/i915: Fix possible overflow when recording semaphore states. drm/i915: Do not unmap object unless no other VMAs reference it drm/i915: remove plane/cursor/pipe assertions from intel_crtc_disable drm/i915: Reorder ctx unref on ppgtt cleanup drm/i915/error: Check the potential ctx obj's vm drm/i915: Fix printing proper min/min/rpe values in debugfs drm/i915: BDW can also detect unclaimed registers ...
-rw-r--r--Documentation/DocBook/drm.tmpl77
-rw-r--r--drivers/gpu/drm/drm_crtc.c98
-rw-r--r--drivers/gpu/drm/drm_edid.c10
-rw-r--r--drivers/gpu/drm/drm_rect.c140
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c35
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c24
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c68
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c57
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c78
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c205
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h40
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c35
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c160
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c202
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c30
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c17
-rw-r--r--include/drm/drm_crtc.h17
-rw-r--r--include/drm/drm_rect.h6
-rw-r--r--include/uapi/drm/drm_mode.h5
36 files changed, 1182 insertions, 428 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 4890d94ec062..1d3756d3176c 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -2508,7 +2508,7 @@ void intel_crt_init(struct drm_device *dev)
2508 <td valign="top" >Description/Restrictions</td> 2508 <td valign="top" >Description/Restrictions</td>
2509 </tr> 2509 </tr>
2510 <tr> 2510 <tr>
2511 <td rowspan="20" valign="top" >DRM</td> 2511 <td rowspan="21" valign="top" >DRM</td>
2512 <td rowspan="2" valign="top" >Generic</td> 2512 <td rowspan="2" valign="top" >Generic</td>
2513 <td valign="top" >“EDID”</td> 2513 <td valign="top" >“EDID”</td>
2514 <td valign="top" >BLOB | IMMUTABLE</td> 2514 <td valign="top" >BLOB | IMMUTABLE</td>
@@ -2639,7 +2639,7 @@ void intel_crt_init(struct drm_device *dev)
2639 <td valign="top" >TBD</td> 2639 <td valign="top" >TBD</td>
2640 </tr> 2640 </tr>
2641 <tr> 2641 <tr>
2642 <td rowspan="2" valign="top" >Optional</td> 2642 <td rowspan="3" valign="top" >Optional</td>
2643 <td valign="top" >“scaling mode”</td> 2643 <td valign="top" >“scaling mode”</td>
2644 <td valign="top" >ENUM</td> 2644 <td valign="top" >ENUM</td>
2645 <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td> 2645 <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
@@ -2647,6 +2647,15 @@ void intel_crt_init(struct drm_device *dev)
2647 <td valign="top" >TBD</td> 2647 <td valign="top" >TBD</td>
2648 </tr> 2648 </tr>
2649 <tr> 2649 <tr>
2650 <td valign="top" >"aspect ratio"</td>
2651 <td valign="top" >ENUM</td>
2652 <td valign="top" >{ "None", "4:3", "16:9" }</td>
2653 <td valign="top" >Connector</td>
2654 <td valign="top" >DRM property to set aspect ratio from user space app.
2655 This enum is made generic to allow addition of custom aspect
2656 ratios.</td>
2657 </tr>
2658 <tr>
2650 <td valign="top" >“dirty”</td> 2659 <td valign="top" >“dirty”</td>
2651 <td valign="top" >ENUM | IMMUTABLE</td> 2660 <td valign="top" >ENUM | IMMUTABLE</td>
2652 <td valign="top" >{ "Off", "On", "Annotate" }</td> 2661 <td valign="top" >{ "Off", "On", "Annotate" }</td>
@@ -2655,7 +2664,7 @@ void intel_crt_init(struct drm_device *dev)
2655 </tr> 2664 </tr>
2656 <tr> 2665 <tr>
2657 <td rowspan="21" valign="top" >i915</td> 2666 <td rowspan="21" valign="top" >i915</td>
2658 <td rowspan="3" valign="top" >Generic</td> 2667 <td rowspan="2" valign="top" >Generic</td>
2659 <td valign="top" >"Broadcast RGB"</td> 2668 <td valign="top" >"Broadcast RGB"</td>
2660 <td valign="top" >ENUM</td> 2669 <td valign="top" >ENUM</td>
2661 <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td> 2670 <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
@@ -2670,10 +2679,11 @@ void intel_crt_init(struct drm_device *dev)
2670 <td valign="top" >TBD</td> 2679 <td valign="top" >TBD</td>
2671 </tr> 2680 </tr>
2672 <tr> 2681 <tr>
2673 <td valign="top" >Standard name as in DRM</td> 2682 <td rowspan="1" valign="top" >Plane</td>
2674 <td valign="top" >Standard type as in DRM</td> 2683 <td valign="top" >“rotation”</td>
2675 <td valign="top" >Standard value as in DRM</td> 2684 <td valign="top" >BITMASK</td>
2676 <td valign="top" >Standard Object as in DRM</td> 2685 <td valign="top" >{ 0, "rotate-0" }, { 2, "rotate-180" }</td>
2686 <td valign="top" >Plane</td>
2677 <td valign="top" >TBD</td> 2687 <td valign="top" >TBD</td>
2678 </tr> 2688 </tr>
2679 <tr> 2689 <tr>
@@ -2805,8 +2815,8 @@ void intel_crt_init(struct drm_device *dev)
2805 <td valign="top" >TBD</td> 2815 <td valign="top" >TBD</td>
2806 </tr> 2816 </tr>
2807 <tr> 2817 <tr>
2808 <td rowspan="3" valign="top" >CDV gma-500</td> 2818 <td rowspan="2" valign="top" >CDV gma-500</td>
2809 <td rowspan="3" valign="top" >Generic</td> 2819 <td rowspan="2" valign="top" >Generic</td>
2810 <td valign="top" >"Broadcast RGB"</td> 2820 <td valign="top" >"Broadcast RGB"</td>
2811 <td valign="top" >ENUM</td> 2821 <td valign="top" >ENUM</td>
2812 <td valign="top" >{ “Full”, “Limited 16:235” }</td> 2822 <td valign="top" >{ “Full”, “Limited 16:235” }</td>
@@ -2821,15 +2831,8 @@ void intel_crt_init(struct drm_device *dev)
2821 <td valign="top" >TBD</td> 2831 <td valign="top" >TBD</td>
2822 </tr> 2832 </tr>
2823 <tr> 2833 <tr>
2824 <td valign="top" >Standard name as in DRM</td> 2834 <td rowspan="19" valign="top" >Poulsbo</td>
2825 <td valign="top" >Standard type as in DRM</td> 2835 <td rowspan="1" valign="top" >Generic</td>
2826 <td valign="top" >Standard value as in DRM</td>
2827 <td valign="top" >Standard Object as in DRM</td>
2828 <td valign="top" >TBD</td>
2829 </tr>
2830 <tr>
2831 <td rowspan="20" valign="top" >Poulsbo</td>
2832 <td rowspan="2" valign="top" >Generic</td>
2833 <td valign="top" >“backlight”</td> 2836 <td valign="top" >“backlight”</td>
2834 <td valign="top" >RANGE</td> 2837 <td valign="top" >RANGE</td>
2835 <td valign="top" >Min=0, Max=100</td> 2838 <td valign="top" >Min=0, Max=100</td>
@@ -2837,13 +2840,6 @@ void intel_crt_init(struct drm_device *dev)
2837 <td valign="top" >TBD</td> 2840 <td valign="top" >TBD</td>
2838 </tr> 2841 </tr>
2839 <tr> 2842 <tr>
2840 <td valign="top" >Standard name as in DRM</td>
2841 <td valign="top" >Standard type as in DRM</td>
2842 <td valign="top" >Standard value as in DRM</td>
2843 <td valign="top" >Standard Object as in DRM</td>
2844 <td valign="top" >TBD</td>
2845 </tr>
2846 <tr>
2847 <td rowspan="17" valign="top" >SDVO-TV</td> 2843 <td rowspan="17" valign="top" >SDVO-TV</td>
2848 <td valign="top" >“mode”</td> 2844 <td valign="top" >“mode”</td>
2849 <td valign="top" >ENUM</td> 2845 <td valign="top" >ENUM</td>
@@ -3070,7 +3066,7 @@ void intel_crt_init(struct drm_device *dev)
3070 <td valign="top" >TBD</td> 3066 <td valign="top" >TBD</td>
3071 </tr> 3067 </tr>
3072 <tr> 3068 <tr>
3073 <td rowspan="3" valign="top" >i2c/ch7006_drv</td> 3069 <td rowspan="2" valign="top" >i2c/ch7006_drv</td>
3074 <td valign="top" >Generic</td> 3070 <td valign="top" >Generic</td>
3075 <td valign="top" >“scale”</td> 3071 <td valign="top" >“scale”</td>
3076 <td valign="top" >RANGE</td> 3072 <td valign="top" >RANGE</td>
@@ -3079,14 +3075,7 @@ void intel_crt_init(struct drm_device *dev)
3079 <td valign="top" >TBD</td> 3075 <td valign="top" >TBD</td>
3080 </tr> 3076 </tr>
3081 <tr> 3077 <tr>
3082 <td rowspan="2" valign="top" >TV</td> 3078 <td rowspan="1" valign="top" >TV</td>
3083 <td valign="top" >Standard names as in DRM</td>
3084 <td valign="top" >Standard types as in DRM</td>
3085 <td valign="top" >Standard Values as in DRM</td>
3086 <td valign="top" >Standard object as in DRM</td>
3087 <td valign="top" >TBD</td>
3088 </tr>
3089 <tr>
3090 <td valign="top" >“mode”</td> 3079 <td valign="top" >“mode”</td>
3091 <td valign="top" >ENUM</td> 3080 <td valign="top" >ENUM</td>
3092 <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc" 3081 <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
@@ -3095,7 +3084,7 @@ void intel_crt_init(struct drm_device *dev)
3095 <td valign="top" >TBD</td> 3084 <td valign="top" >TBD</td>
3096 </tr> 3085 </tr>
3097 <tr> 3086 <tr>
3098 <td rowspan="16" valign="top" >nouveau</td> 3087 <td rowspan="15" valign="top" >nouveau</td>
3099 <td rowspan="6" valign="top" >NV10 Overlay</td> 3088 <td rowspan="6" valign="top" >NV10 Overlay</td>
3100 <td valign="top" >"colorkey"</td> 3089 <td valign="top" >"colorkey"</td>
3101 <td valign="top" >RANGE</td> 3090 <td valign="top" >RANGE</td>
@@ -3204,14 +3193,6 @@ void intel_crt_init(struct drm_device *dev)
3204 <td valign="top" >TBD</td> 3193 <td valign="top" >TBD</td>
3205 </tr> 3194 </tr>
3206 <tr> 3195 <tr>
3207 <td valign="top" >Generic</td>
3208 <td valign="top" >Standard name as in DRM</td>
3209 <td valign="top" >Standard type as in DRM</td>
3210 <td valign="top" >Standard value as in DRM</td>
3211 <td valign="top" >Standard Object as in DRM</td>
3212 <td valign="top" >TBD</td>
3213 </tr>
3214 <tr>
3215 <td rowspan="2" valign="top" >omap</td> 3196 <td rowspan="2" valign="top" >omap</td>
3216 <td rowspan="2" valign="top" >Generic</td> 3197 <td rowspan="2" valign="top" >Generic</td>
3217 <td valign="top" >“rotation”</td> 3198 <td valign="top" >“rotation”</td>
@@ -3242,7 +3223,7 @@ void intel_crt_init(struct drm_device *dev)
3242 <td valign="top" >TBD</td> 3223 <td valign="top" >TBD</td>
3243 </tr> 3224 </tr>
3244 <tr> 3225 <tr>
3245 <td rowspan="10" valign="top" >radeon</td> 3226 <td rowspan="9" valign="top" >radeon</td>
3246 <td valign="top" >DVI-I</td> 3227 <td valign="top" >DVI-I</td>
3247 <td valign="top" >“coherent”</td> 3228 <td valign="top" >“coherent”</td>
3248 <td valign="top" >RANGE</td> 3229 <td valign="top" >RANGE</td>
@@ -3314,14 +3295,6 @@ void intel_crt_init(struct drm_device *dev)
3314 <td valign="top" >TBD</td> 3295 <td valign="top" >TBD</td>
3315 </tr> 3296 </tr>
3316 <tr> 3297 <tr>
3317 <td valign="top" >Generic</td>
3318 <td valign="top" >Standard name as in DRM</td>
3319 <td valign="top" >Standard type as in DRM</td>
3320 <td valign="top" >Standard value as in DRM</td>
3321 <td valign="top" >Standard Object as in DRM</td>
3322 <td valign="top" >TBD</td>
3323 </tr>
3324 <tr>
3325 <td rowspan="3" valign="top" >rcar-du</td> 3298 <td rowspan="3" valign="top" >rcar-du</td>
3326 <td rowspan="3" valign="top" >Generic</td> 3299 <td rowspan="3" valign="top" >Generic</td>
3327 <td valign="top" >"alpha"</td> 3300 <td valign="top" >"alpha"</td>
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index c3a5a8f0825b..3c4a62169f28 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -182,6 +182,12 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
182 { DRM_MODE_SCALE_ASPECT, "Full aspect" }, 182 { DRM_MODE_SCALE_ASPECT, "Full aspect" },
183}; 183};
184 184
185static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
186 { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
187 { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
188 { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
189};
190
185/* 191/*
186 * Non-global properties, but "required" for certain connectors. 192 * Non-global properties, but "required" for certain connectors.
187 */ 193 */
@@ -1463,6 +1469,33 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
1463EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); 1469EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
1464 1470
1465/** 1471/**
1472 * drm_mode_create_aspect_ratio_property - create aspect ratio property
1473 * @dev: DRM device
1474 *
1475 * Called by a driver the first time it's needed, must be attached to desired
1476 * connectors.
1477 *
1478 * Returns:
1479 * Zero on success, errno on failure.
1480 */
1481int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
1482{
1483 if (dev->mode_config.aspect_ratio_property)
1484 return 0;
1485
1486 dev->mode_config.aspect_ratio_property =
1487 drm_property_create_enum(dev, 0, "aspect ratio",
1488 drm_aspect_ratio_enum_list,
1489 ARRAY_SIZE(drm_aspect_ratio_enum_list));
1490
1491 if (dev->mode_config.aspect_ratio_property == NULL)
1492 return -ENOMEM;
1493
1494 return 0;
1495}
1496EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
1497
1498/**
1466 * drm_mode_create_dirty_property - create dirty property 1499 * drm_mode_create_dirty_property - create dirty property
1467 * @dev: DRM device 1500 * @dev: DRM device
1468 * 1501 *
@@ -3476,19 +3509,28 @@ EXPORT_SYMBOL(drm_property_create_enum);
3476struct drm_property *drm_property_create_bitmask(struct drm_device *dev, 3509struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
3477 int flags, const char *name, 3510 int flags, const char *name,
3478 const struct drm_prop_enum_list *props, 3511 const struct drm_prop_enum_list *props,
3479 int num_values) 3512 int num_props,
3513 uint64_t supported_bits)
3480{ 3514{
3481 struct drm_property *property; 3515 struct drm_property *property;
3482 int i, ret; 3516 int i, ret, index = 0;
3517 int num_values = hweight64(supported_bits);
3483 3518
3484 flags |= DRM_MODE_PROP_BITMASK; 3519 flags |= DRM_MODE_PROP_BITMASK;
3485 3520
3486 property = drm_property_create(dev, flags, name, num_values); 3521 property = drm_property_create(dev, flags, name, num_values);
3487 if (!property) 3522 if (!property)
3488 return NULL; 3523 return NULL;
3524 for (i = 0; i < num_props; i++) {
3525 if (!(supported_bits & (1ULL << props[i].type)))
3526 continue;
3489 3527
3490 for (i = 0; i < num_values; i++) { 3528 if (WARN_ON(index >= num_values)) {
3491 ret = drm_property_add_enum(property, i, 3529 drm_property_destroy(dev, property);
3530 return NULL;
3531 }
3532
3533 ret = drm_property_add_enum(property, index++,
3492 props[i].type, 3534 props[i].type,
3493 props[i].name); 3535 props[i].name);
3494 if (ret) { 3536 if (ret) {
@@ -4937,6 +4979,36 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
4937EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); 4979EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
4938 4980
4939/** 4981/**
4982 * drm_rotation_simplify() - Try to simplify the rotation
4983 * @rotation: Rotation to be simplified
4984 * @supported_rotations: Supported rotations
4985 *
4986 * Attempt to simplify the rotation to a form that is supported.
4987 * Eg. if the hardware supports everything except DRM_REFLECT_X
4988 * one could call this function like this:
4989 *
4990 * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
4991 * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
4992 * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
4993 *
4994 * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
4995 * transforms the hardware supports, this function may not
4996 * be able to produce a supported transform, so the caller should
4997 * check the result afterwards.
4998 */
4999unsigned int drm_rotation_simplify(unsigned int rotation,
5000 unsigned int supported_rotations)
5001{
5002 if (rotation & ~supported_rotations) {
5003 rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
5004 rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
5005 }
5006
5007 return rotation;
5008}
5009EXPORT_SYMBOL(drm_rotation_simplify);
5010
5011/**
4940 * drm_mode_config_init - initialize DRM mode_configuration structure 5012 * drm_mode_config_init - initialize DRM mode_configuration structure
4941 * @dev: DRM device 5013 * @dev: DRM device
4942 * 5014 *
@@ -5054,3 +5126,21 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5054 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5126 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
5055} 5127}
5056EXPORT_SYMBOL(drm_mode_config_cleanup); 5128EXPORT_SYMBOL(drm_mode_config_cleanup);
5129
5130struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
5131 unsigned int supported_rotations)
5132{
5133 static const struct drm_prop_enum_list props[] = {
5134 { DRM_ROTATE_0, "rotate-0" },
5135 { DRM_ROTATE_90, "rotate-90" },
5136 { DRM_ROTATE_180, "rotate-180" },
5137 { DRM_ROTATE_270, "rotate-270" },
5138 { DRM_REFLECT_X, "reflect-x" },
5139 { DRM_REFLECT_Y, "reflect-y" },
5140 };
5141
5142 return drm_property_create_bitmask(dev, 0, "rotation",
5143 props, ARRAY_SIZE(props),
5144 supported_rotations);
5145}
5146EXPORT_SYMBOL(drm_mode_create_rotation_property);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 087d6080bc1d..1dbf3bc4c6a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3776,8 +3776,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3776 3776
3777 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; 3777 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
3778 3778
3779 /* Populate picture aspect ratio from CEA mode list */ 3779 /*
3780 if (frame->video_code > 0) 3780 * Populate picture aspect ratio from either
3781 * user input (if specified) or from the CEA mode list.
3782 */
3783 if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
3784 mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
3785 frame->picture_aspect = mode->picture_aspect_ratio;
3786 else if (frame->video_code > 0)
3781 frame->picture_aspect = drm_get_cea_aspect_ratio( 3787 frame->picture_aspect = drm_get_cea_aspect_ratio(
3782 frame->video_code); 3788 frame->video_code);
3783 3789
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 7047ca025787..631f5afd451c 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
293 DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1); 293 DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
294} 294}
295EXPORT_SYMBOL(drm_rect_debug_print); 295EXPORT_SYMBOL(drm_rect_debug_print);
296
297/**
298 * drm_rect_rotate - Rotate the rectangle
299 * @r: rectangle to be rotated
300 * @width: Width of the coordinate space
301 * @height: Height of the coordinate space
302 * @rotation: Transformation to be applied
303 *
304 * Apply @rotation to the coordinates of rectangle @r.
305 *
306 * @width and @height combined with @rotation define
307 * the location of the new origin.
308 *
309 * @width correcsponds to the horizontal and @height
310 * to the vertical axis of the untransformed coordinate
311 * space.
312 */
313void drm_rect_rotate(struct drm_rect *r,
314 int width, int height,
315 unsigned int rotation)
316{
317 struct drm_rect tmp;
318
319 if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
320 tmp = *r;
321
322 if (rotation & BIT(DRM_REFLECT_X)) {
323 r->x1 = width - tmp.x2;
324 r->x2 = width - tmp.x1;
325 }
326
327 if (rotation & BIT(DRM_REFLECT_Y)) {
328 r->y1 = height - tmp.y2;
329 r->y2 = height - tmp.y1;
330 }
331 }
332
333 switch (rotation & 0xf) {
334 case BIT(DRM_ROTATE_0):
335 break;
336 case BIT(DRM_ROTATE_90):
337 tmp = *r;
338 r->x1 = tmp.y1;
339 r->x2 = tmp.y2;
340 r->y1 = width - tmp.x2;
341 r->y2 = width - tmp.x1;
342 break;
343 case BIT(DRM_ROTATE_180):
344 tmp = *r;
345 r->x1 = width - tmp.x2;
346 r->x2 = width - tmp.x1;
347 r->y1 = height - tmp.y2;
348 r->y2 = height - tmp.y1;
349 break;
350 case BIT(DRM_ROTATE_270):
351 tmp = *r;
352 r->x1 = height - tmp.y2;
353 r->x2 = height - tmp.y1;
354 r->y1 = tmp.x1;
355 r->y2 = tmp.x2;
356 break;
357 default:
358 break;
359 }
360}
361EXPORT_SYMBOL(drm_rect_rotate);
362
363/**
364 * drm_rect_rotate_inv - Inverse rotate the rectangle
365 * @r: rectangle to be rotated
366 * @width: Width of the coordinate space
367 * @height: Height of the coordinate space
368 * @rotation: Transformation whose inverse is to be applied
369 *
370 * Apply the inverse of @rotation to the coordinates
371 * of rectangle @r.
372 *
373 * @width and @height combined with @rotation define
374 * the location of the new origin.
375 *
376 * @width correcsponds to the horizontal and @height
377 * to the vertical axis of the original untransformed
378 * coordinate space, so that you never have to flip
379 * them when doing a rotatation and its inverse.
380 * That is, if you do:
381 *
382 * drm_rotate(&r, width, height, rotation);
383 * drm_rotate_inv(&r, width, height, rotation);
384 *
385 * you will always get back the original rectangle.
386 */
387void drm_rect_rotate_inv(struct drm_rect *r,
388 int width, int height,
389 unsigned int rotation)
390{
391 struct drm_rect tmp;
392
393 switch (rotation & 0xf) {
394 case BIT(DRM_ROTATE_0):
395 break;
396 case BIT(DRM_ROTATE_90):
397 tmp = *r;
398 r->x1 = width - tmp.y2;
399 r->x2 = width - tmp.y1;
400 r->y1 = tmp.x1;
401 r->y2 = tmp.x2;
402 break;
403 case BIT(DRM_ROTATE_180):
404 tmp = *r;
405 r->x1 = width - tmp.x2;
406 r->x2 = width - tmp.x1;
407 r->y1 = height - tmp.y2;
408 r->y2 = height - tmp.y1;
409 break;
410 case BIT(DRM_ROTATE_270):
411 tmp = *r;
412 r->x1 = tmp.y1;
413 r->x2 = tmp.y2;
414 r->y1 = height - tmp.x2;
415 r->y2 = height - tmp.x1;
416 break;
417 default:
418 break;
419 }
420
421 if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
422 tmp = *r;
423
424 if (rotation & BIT(DRM_REFLECT_X)) {
425 r->x1 = width - tmp.x2;
426 r->x2 = width - tmp.x1;
427 }
428
429 if (rotation & BIT(DRM_REFLECT_Y)) {
430 r->y1 = height - tmp.y2;
431 r->y2 = height - tmp.y1;
432 }
433 }
434}
435EXPORT_SYMBOL(drm_rect_rotate_inv);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 437e1824d0bf..4e39ab34eb1c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
69 option changes the default for that module option. 69 option changes the default for that module option.
70 70
71 If in doubt, say "N". 71 If in doubt, say "N".
72
73config DRM_I915_UMS
74 bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
75 depends on DRM_I915 && BROKEN
76 default n
77 help
78 Choose this option if you still need userspace modesetting.
79
80 Userspace modesetting is deprecated for quite some time now, so
81 enable this only if you have ancient versions of the DDX drivers.
82
83 If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6ff0ecf161c..9e737b771c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1108,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1108 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1108 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1109 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); 1109 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1110 } else if (IS_VALLEYVIEW(dev)) { 1110 } else if (IS_VALLEYVIEW(dev)) {
1111 u32 freq_sts, val; 1111 u32 freq_sts;
1112 1112
1113 mutex_lock(&dev_priv->rps.hw_lock); 1113 mutex_lock(&dev_priv->rps.hw_lock);
1114 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1114 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1115 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1115 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1116 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1116 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1117 1117
1118 val = valleyview_rps_max_freq(dev_priv);
1119 seq_printf(m, "max GPU freq: %d MHz\n", 1118 seq_printf(m, "max GPU freq: %d MHz\n",
1120 vlv_gpu_freq(dev_priv, val)); 1119 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1121 1120
1122 val = valleyview_rps_min_freq(dev_priv);
1123 seq_printf(m, "min GPU freq: %d MHz\n", 1121 seq_printf(m, "min GPU freq: %d MHz\n",
1124 vlv_gpu_freq(dev_priv, val)); 1122 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1123
1124 seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
1125 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1125 1126
1126 seq_printf(m, "current GPU freq: %d MHz\n", 1127 seq_printf(m, "current GPU freq: %d MHz\n",
1127 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1128 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
@@ -1891,10 +1892,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1891 1892
1892 intel_runtime_pm_get(dev_priv); 1893 intel_runtime_pm_get(dev_priv);
1893 1894
1895 mutex_lock(&dev_priv->psr.lock);
1894 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1896 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1895 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1897 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1896 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); 1898 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
1897 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 1899 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
1900 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
1901 dev_priv->psr.busy_frontbuffer_bits);
1902 seq_printf(m, "Re-enable work scheduled: %s\n",
1903 yesno(work_busy(&dev_priv->psr.work.work)));
1898 1904
1899 enabled = HAS_PSR(dev) && 1905 enabled = HAS_PSR(dev) &&
1900 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1906 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
@@ -1904,6 +1910,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1904 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 1910 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1905 EDP_PSR_PERF_CNT_MASK; 1911 EDP_PSR_PERF_CNT_MASK;
1906 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1912 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1913 mutex_unlock(&dev_priv->psr.lock);
1907 1914
1908 intel_runtime_pm_put(dev_priv); 1915 intel_runtime_pm_put(dev_priv);
1909 return 0; 1916 return 0;
@@ -1989,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
1989 1996
1990 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 1997 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
1991 seq_printf(m, "IRQs disabled: %s\n", 1998 seq_printf(m, "IRQs disabled: %s\n",
1992 yesno(dev_priv->pm.irqs_disabled)); 1999 yesno(!intel_irqs_enabled(dev_priv)));
1993 2000
1994 return 0; 2001 return 0;
1995} 2002}
@@ -3284,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
3284{ 3291{
3285 struct drm_device *dev = inode->i_private; 3292 struct drm_device *dev = inode->i_private;
3286 3293
3287 if (!HAS_PCH_SPLIT(dev)) 3294 if (HAS_GMCH_DISPLAY(dev))
3288 return -ENODEV; 3295 return -ENODEV;
3289 3296
3290 return single_open(file, pri_wm_latency_show, dev); 3297 return single_open(file, pri_wm_latency_show, dev);
@@ -3294,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
3294{ 3301{
3295 struct drm_device *dev = inode->i_private; 3302 struct drm_device *dev = inode->i_private;
3296 3303
3297 if (!HAS_PCH_SPLIT(dev)) 3304 if (HAS_GMCH_DISPLAY(dev))
3298 return -ENODEV; 3305 return -ENODEV;
3299 3306
3300 return single_open(file, spr_wm_latency_show, dev); 3307 return single_open(file, spr_wm_latency_show, dev);
@@ -3304,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
3304{ 3311{
3305 struct drm_device *dev = inode->i_private; 3312 struct drm_device *dev = inode->i_private;
3306 3313
3307 if (!HAS_PCH_SPLIT(dev)) 3314 if (HAS_GMCH_DISPLAY(dev))
3308 return -ENODEV; 3315 return -ENODEV;
3309 3316
3310 return single_open(file, cur_wm_latency_show, dev); 3317 return single_open(file, cur_wm_latency_show, dev);
@@ -3656,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val)
3656 if (IS_VALLEYVIEW(dev)) { 3663 if (IS_VALLEYVIEW(dev)) {
3657 val = vlv_freq_opcode(dev_priv, val); 3664 val = vlv_freq_opcode(dev_priv, val);
3658 3665
3659 hw_max = valleyview_rps_max_freq(dev_priv); 3666 hw_max = dev_priv->rps.max_freq;
3660 hw_min = valleyview_rps_min_freq(dev_priv); 3667 hw_min = dev_priv->rps.min_freq;
3661 } else { 3668 } else {
3662 do_div(val, GT_FREQUENCY_MULTIPLIER); 3669 do_div(val, GT_FREQUENCY_MULTIPLIER);
3663 3670
@@ -3737,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val)
3737 if (IS_VALLEYVIEW(dev)) { 3744 if (IS_VALLEYVIEW(dev)) {
3738 val = vlv_freq_opcode(dev_priv, val); 3745 val = vlv_freq_opcode(dev_priv, val);
3739 3746
3740 hw_max = valleyview_rps_max_freq(dev_priv); 3747 hw_max = dev_priv->rps.max_freq;
3741 hw_min = valleyview_rps_min_freq(dev_priv); 3748 hw_min = dev_priv->rps.min_freq;
3742 } else { 3749 } else {
3743 do_div(val, GT_FREQUENCY_MULTIPLIER); 3750 do_div(val, GT_FREQUENCY_MULTIPLIER);
3744 3751
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d335c46ec6bc..b0b03b62d090 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1340,6 +1340,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1340 if (ret) 1340 if (ret)
1341 goto cleanup_gem_stolen; 1341 goto cleanup_gem_stolen;
1342 1342
1343 dev_priv->pm._irqs_disabled = false;
1344
1343 /* Important: The output setup functions called by modeset_init need 1345 /* Important: The output setup functions called by modeset_init need
1344 * working irqs for e.g. gmbus and dp aux transfers. */ 1346 * working irqs for e.g. gmbus and dp aux transfers. */
1345 intel_modeset_init(dev); 1347 intel_modeset_init(dev);
@@ -1424,15 +1426,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1424} 1426}
1425 1427
1426#if IS_ENABLED(CONFIG_FB) 1428#if IS_ENABLED(CONFIG_FB)
1427static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1429static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1428{ 1430{
1429 struct apertures_struct *ap; 1431 struct apertures_struct *ap;
1430 struct pci_dev *pdev = dev_priv->dev->pdev; 1432 struct pci_dev *pdev = dev_priv->dev->pdev;
1431 bool primary; 1433 bool primary;
1434 int ret;
1432 1435
1433 ap = alloc_apertures(1); 1436 ap = alloc_apertures(1);
1434 if (!ap) 1437 if (!ap)
1435 return; 1438 return -ENOMEM;
1436 1439
1437 ap->ranges[0].base = dev_priv->gtt.mappable_base; 1440 ap->ranges[0].base = dev_priv->gtt.mappable_base;
1438 ap->ranges[0].size = dev_priv->gtt.mappable_end; 1441 ap->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -1440,13 +1443,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1440 primary = 1443 primary =
1441 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 1444 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1442 1445
1443 remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 1446 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1444 1447
1445 kfree(ap); 1448 kfree(ap);
1449
1450 return ret;
1446} 1451}
1447#else 1452#else
1448static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1453static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1449{ 1454{
1455 return 0;
1450} 1456}
1451#endif 1457#endif
1452 1458
@@ -1664,7 +1670,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1664 goto out_gtt; 1670 goto out_gtt;
1665 } 1671 }
1666 1672
1667 i915_kick_out_firmware_fb(dev_priv); 1673 ret = i915_kick_out_firmware_fb(dev_priv);
1674 if (ret) {
1675 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1676 goto out_gtt;
1677 }
1668 } 1678 }
1669 1679
1670 pci_set_master(dev->pdev); 1680 pci_set_master(dev->pdev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2a83833207b1..6c4b25ce8bb0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -303,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
304 .has_llc = 1, 304 .has_llc = 1,
305 .has_ddi = 1, 305 .has_ddi = 1,
306 .has_fpga_dbg = 1,
306 .has_fbc = 1, 307 .has_fbc = 1,
307 GEN_DEFAULT_PIPEOFFSETS, 308 GEN_DEFAULT_PIPEOFFSETS,
308 IVB_CURSOR_OFFSETS, 309 IVB_CURSOR_OFFSETS,
@@ -314,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
314 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
315 .has_llc = 1, 316 .has_llc = 1,
316 .has_ddi = 1, 317 .has_ddi = 1,
318 .has_fpga_dbg = 1,
317 .has_fbc = 1, 319 .has_fbc = 1,
318 GEN_DEFAULT_PIPEOFFSETS, 320 GEN_DEFAULT_PIPEOFFSETS,
319 IVB_CURSOR_OFFSETS, 321 IVB_CURSOR_OFFSETS,
@@ -325,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = {
325 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
326 .has_llc = 1, 328 .has_llc = 1,
327 .has_ddi = 1, 329 .has_ddi = 1,
330 .has_fpga_dbg = 1,
328 .has_fbc = 1, 331 .has_fbc = 1,
329 GEN_DEFAULT_PIPEOFFSETS, 332 GEN_DEFAULT_PIPEOFFSETS,
330 IVB_CURSOR_OFFSETS, 333 IVB_CURSOR_OFFSETS,
@@ -336,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
336 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
337 .has_llc = 1, 340 .has_llc = 1,
338 .has_ddi = 1, 341 .has_ddi = 1,
342 .has_fpga_dbg = 1,
339 .has_fbc = 1, 343 .has_fbc = 1,
340 GEN_DEFAULT_PIPEOFFSETS, 344 GEN_DEFAULT_PIPEOFFSETS,
341 IVB_CURSOR_OFFSETS, 345 IVB_CURSOR_OFFSETS,
@@ -518,12 +522,11 @@ static int i915_drm_freeze(struct drm_device *dev)
518 522
519 /* 523 /*
520 * Disable CRTCs directly since we want to preserve sw state 524 * Disable CRTCs directly since we want to preserve sw state
521 * for _thaw. 525 * for _thaw. Also, power gate the CRTC power wells.
522 */ 526 */
523 drm_modeset_lock_all(dev); 527 drm_modeset_lock_all(dev);
524 for_each_crtc(dev, crtc) { 528 for_each_crtc(dev, crtc)
525 dev_priv->display.crtc_disable(crtc); 529 intel_crtc_control(crtc, false);
526 }
527 drm_modeset_unlock_all(dev); 530 drm_modeset_unlock_all(dev);
528 531
529 intel_dp_mst_suspend(dev); 532 intel_dp_mst_suspend(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7db16bee2997..f4832b770522 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -179,6 +179,10 @@ enum hpd_pin {
179 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 179 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
180 if ((intel_connector)->base.encoder == (__encoder)) 180 if ((intel_connector)->base.encoder == (__encoder))
181 181
182#define for_each_power_domain(domain, mask) \
183 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
184 if ((1 << (domain)) & (mask))
185
182struct drm_i915_private; 186struct drm_i915_private;
183struct i915_mmu_object; 187struct i915_mmu_object;
184 188
@@ -436,8 +440,8 @@ struct drm_i915_display_funcs {
436 void (*update_wm)(struct drm_crtc *crtc); 440 void (*update_wm)(struct drm_crtc *crtc);
437 void (*update_sprite_wm)(struct drm_plane *plane, 441 void (*update_sprite_wm)(struct drm_plane *plane,
438 struct drm_crtc *crtc, 442 struct drm_crtc *crtc,
439 uint32_t sprite_width, int pixel_size, 443 uint32_t sprite_width, uint32_t sprite_height,
440 bool enable, bool scaled); 444 int pixel_size, bool enable, bool scaled);
441 void (*modeset_global_resources)(struct drm_device *dev); 445 void (*modeset_global_resources)(struct drm_device *dev);
442 /* Returns the active state of the crtc, and if the crtc is active, 446 /* Returns the active state of the crtc, and if the crtc is active,
443 * fills out the pipe-config with the hw state. */ 447 * fills out the pipe-config with the hw state. */
@@ -654,13 +658,15 @@ struct i915_drrs {
654 struct intel_connector *connector; 658 struct intel_connector *connector;
655}; 659};
656 660
661struct intel_dp;
657struct i915_psr { 662struct i915_psr {
663 struct mutex lock;
658 bool sink_support; 664 bool sink_support;
659 bool source_ok; 665 bool source_ok;
660 bool setup_done; 666 struct intel_dp *enabled;
661 bool enabled;
662 bool active; 667 bool active;
663 struct delayed_work work; 668 struct delayed_work work;
669 unsigned busy_frontbuffer_bits;
664}; 670};
665 671
666enum intel_pch { 672enum intel_pch {
@@ -931,6 +937,7 @@ struct intel_gen6_power_mgmt {
931 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 937 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
932 u8 rp1_freq; /* "less than" RP0 power/freqency */ 938 u8 rp1_freq; /* "less than" RP0 power/freqency */
933 u8 rp0_freq; /* Non-overclocked max frequency. */ 939 u8 rp0_freq; /* Non-overclocked max frequency. */
940 u32 cz_freq;
934 941
935 u32 ei_interrupt_count; 942 u32 ei_interrupt_count;
936 943
@@ -1263,6 +1270,7 @@ struct intel_vbt_data {
1263 u16 pwm_freq_hz; 1270 u16 pwm_freq_hz;
1264 bool present; 1271 bool present;
1265 bool active_low_pwm; 1272 bool active_low_pwm;
1273 u8 min_brightness; /* min_brightness/255 of max */
1266 } backlight; 1274 } backlight;
1267 1275
1268 /* MIPI DSI */ 1276 /* MIPI DSI */
@@ -1332,7 +1340,7 @@ struct ilk_wm_values {
1332 */ 1340 */
1333struct i915_runtime_pm { 1341struct i915_runtime_pm {
1334 bool suspended; 1342 bool suspended;
1335 bool irqs_disabled; 1343 bool _irqs_disabled;
1336}; 1344};
1337 1345
1338enum intel_pipe_crc_source { 1346enum intel_pipe_crc_source {
@@ -2093,6 +2101,8 @@ struct drm_i915_cmd_table {
2093#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2101#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
2094#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2102#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
2095 2103
2104#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
2105
2096/* DPF == dynamic parity feature */ 2106/* DPF == dynamic parity feature */
2097#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2107#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2098#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2108#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@ -2136,6 +2146,7 @@ struct i915_params {
2136 bool disable_display; 2146 bool disable_display;
2137 bool disable_vtd_wa; 2147 bool disable_vtd_wa;
2138 int use_mmio_flip; 2148 int use_mmio_flip;
2149 bool mmio_debug;
2139}; 2150};
2140extern struct i915_params i915 __read_mostly; 2151extern struct i915_params i915 __read_mostly;
2141 2152
@@ -2692,8 +2703,6 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2692extern void intel_init_pch_refclk(struct drm_device *dev); 2703extern void intel_init_pch_refclk(struct drm_device *dev);
2693extern void gen6_set_rps(struct drm_device *dev, u8 val); 2704extern void gen6_set_rps(struct drm_device *dev, u8 val);
2694extern void valleyview_set_rps(struct drm_device *dev, u8 val); 2705extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2695extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2696extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
2697extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 2706extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2698 bool enable); 2707 bool enable);
2699extern void intel_detect_pch(struct drm_device *dev); 2708extern void intel_detect_pch(struct drm_device *dev);
@@ -2803,10 +2812,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
2803 2812
2804static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 2813static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2805{ 2814{
2806 if (HAS_PCH_SPLIT(dev)) 2815 if (IS_VALLEYVIEW(dev))
2807 return CPU_VGACNTRL;
2808 else if (IS_VALLEYVIEW(dev))
2809 return VLV_VGACNTRL; 2816 return VLV_VGACNTRL;
2817 else if (INTEL_INFO(dev)->gen >= 5)
2818 return CPU_VGACNTRL;
2810 else 2819 else
2811 return VGACNTRL; 2820 return VGACNTRL;
2812} 2821}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e5d4d73a9844..dcd8d7b42552 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1161,7 +1161,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1161 unsigned long timeout_expire; 1161 unsigned long timeout_expire;
1162 int ret; 1162 int ret;
1163 1163
1164 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); 1164 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1165 1165
1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1167 return 0; 1167 return 0;
@@ -2927,8 +2927,6 @@ int i915_vma_unbind(struct i915_vma *vma)
2927 2927
2928 vma->unbind_vma(vma); 2928 vma->unbind_vma(vma);
2929 2929
2930 i915_gem_gtt_finish_object(obj);
2931
2932 list_del_init(&vma->mm_list); 2930 list_del_init(&vma->mm_list);
2933 /* Avoid an unnecessary call to unbind on rebind. */ 2931 /* Avoid an unnecessary call to unbind on rebind. */
2934 if (i915_is_ggtt(vma->vm)) 2932 if (i915_is_ggtt(vma->vm))
@@ -2939,8 +2937,10 @@ int i915_vma_unbind(struct i915_vma *vma)
2939 2937
2940 /* Since the unbound list is global, only move to that list if 2938 /* Since the unbound list is global, only move to that list if
2941 * no more VMAs exist. */ 2939 * no more VMAs exist. */
2942 if (list_empty(&obj->vma_list)) 2940 if (list_empty(&obj->vma_list)) {
2941 i915_gem_gtt_finish_object(obj);
2943 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2942 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2943 }
2944 2944
2945 /* And finally now the object is completely decoupled from this vma, 2945 /* And finally now the object is completely decoupled from this vma,
2946 * we can drop its hold on the backing storage and allow it to be 2946 * we can drop its hold on the backing storage and allow it to be
@@ -5194,8 +5194,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5194 bool was_interruptible; 5194 bool was_interruptible;
5195 bool unlock; 5195 bool unlock;
5196 5196
5197 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) 5197 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5198 schedule_timeout_killable(1); 5198 schedule_timeout_killable(1);
5199 if (fatal_signal_pending(current))
5200 return NOTIFY_DONE;
5201 }
5199 if (timeout == 0) { 5202 if (timeout == 0) {
5200 pr_err("Unable to purge GPU memory due lock contention.\n"); 5203 pr_err("Unable to purge GPU memory due lock contention.\n");
5201 return NOTIFY_DONE; 5204 return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index de72a2859f32..3b99390e467a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -186,14 +186,12 @@ void i915_gem_context_free(struct kref *ctx_ref)
186 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
187 if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev)) 187 if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
188 ppgtt = ctx_to_ppgtt(ctx); 188 ppgtt = ctx_to_ppgtt(ctx);
189
190 /* XXX: Free up the object before tearing down the address space, in
191 * case we're bound in the PPGTT */
192 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
193 } 189 }
194 190
195 if (ppgtt) 191 if (ppgtt)
196 kref_put(&ppgtt->ref, ppgtt_release); 192 kref_put(&ppgtt->ref, ppgtt_release);
193 if (ctx->legacy_hw_ctx.rcs_state)
194 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
197 list_del(&ctx->link); 195 list_del(&ctx->link);
198 kfree(ctx); 196 kfree(ctx);
199} 197}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a4153eef48c2..5188936bca0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -64,7 +64,8 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
64#endif 64#endif
65 65
66 /* Early VLV doesn't have this */ 66 /* Early VLV doesn't have this */
67 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { 67 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
68 dev->pdev->revision < 0xb) {
68 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 69 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
69 return 0; 70 return 0;
70 } 71 }
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea92886a56..fe69fc837d9e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -40,19 +40,87 @@ struct i915_mmu_notifier {
40 struct hlist_node node; 40 struct hlist_node node;
41 struct mmu_notifier mn; 41 struct mmu_notifier mn;
42 struct rb_root objects; 42 struct rb_root objects;
43 struct list_head linear;
43 struct drm_device *dev; 44 struct drm_device *dev;
44 struct mm_struct *mm; 45 struct mm_struct *mm;
45 struct work_struct work; 46 struct work_struct work;
46 unsigned long count; 47 unsigned long count;
47 unsigned long serial; 48 unsigned long serial;
49 bool has_linear;
48}; 50};
49 51
50struct i915_mmu_object { 52struct i915_mmu_object {
51 struct i915_mmu_notifier *mmu; 53 struct i915_mmu_notifier *mmu;
52 struct interval_tree_node it; 54 struct interval_tree_node it;
55 struct list_head link;
53 struct drm_i915_gem_object *obj; 56 struct drm_i915_gem_object *obj;
57 bool is_linear;
54}; 58};
55 59
60static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
61{
62 struct drm_device *dev = obj->base.dev;
63 unsigned long end;
64
65 mutex_lock(&dev->struct_mutex);
66 /* Cancel any active worker and force us to re-evaluate gup */
67 obj->userptr.work = NULL;
68
69 if (obj->pages != NULL) {
70 struct drm_i915_private *dev_priv = to_i915(dev);
71 struct i915_vma *vma, *tmp;
72 bool was_interruptible;
73
74 was_interruptible = dev_priv->mm.interruptible;
75 dev_priv->mm.interruptible = false;
76
77 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
78 int ret = i915_vma_unbind(vma);
79 WARN_ON(ret && ret != -EIO);
80 }
81 WARN_ON(i915_gem_object_put_pages(obj));
82
83 dev_priv->mm.interruptible = was_interruptible;
84 }
85
86 end = obj->userptr.ptr + obj->base.size;
87
88 drm_gem_object_unreference(&obj->base);
89 mutex_unlock(&dev->struct_mutex);
90
91 return end;
92}
93
94static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
95 struct mm_struct *mm,
96 unsigned long start,
97 unsigned long end)
98{
99 struct i915_mmu_object *mmu;
100 unsigned long serial;
101
102restart:
103 serial = mn->serial;
104 list_for_each_entry(mmu, &mn->linear, link) {
105 struct drm_i915_gem_object *obj;
106
107 if (mmu->it.last < start || mmu->it.start > end)
108 continue;
109
110 obj = mmu->obj;
111 drm_gem_object_reference(&obj->base);
112 spin_unlock(&mn->lock);
113
114 cancel_userptr(obj);
115
116 spin_lock(&mn->lock);
117 if (serial != mn->serial)
118 goto restart;
119 }
120
121 return NULL;
122}
123
56static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, 124static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
57 struct mm_struct *mm, 125 struct mm_struct *mm,
58 unsigned long start, 126 unsigned long start,
@@ -60,16 +128,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
60{ 128{
61 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); 129 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
62 struct interval_tree_node *it = NULL; 130 struct interval_tree_node *it = NULL;
131 unsigned long next = start;
63 unsigned long serial = 0; 132 unsigned long serial = 0;
64 133
65 end--; /* interval ranges are inclusive, but invalidate range is exclusive */ 134 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
66 while (start < end) { 135 while (next < end) {
67 struct drm_i915_gem_object *obj; 136 struct drm_i915_gem_object *obj = NULL;
68 137
69 obj = NULL;
70 spin_lock(&mn->lock); 138 spin_lock(&mn->lock);
71 if (serial == mn->serial) 139 if (mn->has_linear)
72 it = interval_tree_iter_next(it, start, end); 140 it = invalidate_range__linear(mn, mm, start, end);
141 else if (serial == mn->serial)
142 it = interval_tree_iter_next(it, next, end);
73 else 143 else
74 it = interval_tree_iter_first(&mn->objects, start, end); 144 it = interval_tree_iter_first(&mn->objects, start, end);
75 if (it != NULL) { 145 if (it != NULL) {
@@ -81,31 +151,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
81 if (obj == NULL) 151 if (obj == NULL)
82 return; 152 return;
83 153
84 mutex_lock(&mn->dev->struct_mutex); 154 next = cancel_userptr(obj);
85 /* Cancel any active worker and force us to re-evaluate gup */
86 obj->userptr.work = NULL;
87
88 if (obj->pages != NULL) {
89 struct drm_i915_private *dev_priv = to_i915(mn->dev);
90 struct i915_vma *vma, *tmp;
91 bool was_interruptible;
92
93 was_interruptible = dev_priv->mm.interruptible;
94 dev_priv->mm.interruptible = false;
95
96 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
97 int ret = i915_vma_unbind(vma);
98 WARN_ON(ret && ret != -EIO);
99 }
100 WARN_ON(i915_gem_object_put_pages(obj));
101
102 dev_priv->mm.interruptible = was_interruptible;
103 }
104
105 start = obj->userptr.ptr + obj->base.size;
106
107 drm_gem_object_unreference(&obj->base);
108 mutex_unlock(&mn->dev->struct_mutex);
109 } 155 }
110} 156}
111 157
@@ -150,7 +196,9 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
150 mmu->mm = mm; 196 mmu->mm = mm;
151 mmu->objects = RB_ROOT; 197 mmu->objects = RB_ROOT;
152 mmu->count = 0; 198 mmu->count = 0;
153 mmu->serial = 0; 199 mmu->serial = 1;
200 INIT_LIST_HEAD(&mmu->linear);
201 mmu->has_linear = false;
154 202
155 /* Protected by mmap_sem (write-lock) */ 203 /* Protected by mmap_sem (write-lock) */
156 ret = __mmu_notifier_register(&mmu->mn, mm); 204 ret = __mmu_notifier_register(&mmu->mn, mm);
@@ -197,6 +245,17 @@ static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
197 mmu->serial = 1; 245 mmu->serial = 1;
198} 246}
199 247
248static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
249{
250 struct i915_mmu_object *mn;
251
252 list_for_each_entry(mn, &mmu->linear, link)
253 if (mn->is_linear)
254 return true;
255
256 return false;
257}
258
200static void 259static void
201i915_mmu_notifier_del(struct i915_mmu_notifier *mmu, 260i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
202 struct i915_mmu_object *mn) 261 struct i915_mmu_object *mn)
@@ -204,7 +263,11 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
204 lockdep_assert_held(&mmu->dev->struct_mutex); 263 lockdep_assert_held(&mmu->dev->struct_mutex);
205 264
206 spin_lock(&mmu->lock); 265 spin_lock(&mmu->lock);
207 interval_tree_remove(&mn->it, &mmu->objects); 266 list_del(&mn->link);
267 if (mn->is_linear)
268 mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
269 else
270 interval_tree_remove(&mn->it, &mmu->objects);
208 __i915_mmu_notifier_update_serial(mmu); 271 __i915_mmu_notifier_update_serial(mmu);
209 spin_unlock(&mmu->lock); 272 spin_unlock(&mmu->lock);
210 273
@@ -230,7 +293,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
230 */ 293 */
231 i915_gem_retire_requests(mmu->dev); 294 i915_gem_retire_requests(mmu->dev);
232 295
233 /* Disallow overlapping userptr objects */
234 spin_lock(&mmu->lock); 296 spin_lock(&mmu->lock);
235 it = interval_tree_iter_first(&mmu->objects, 297 it = interval_tree_iter_first(&mmu->objects,
236 mn->it.start, mn->it.last); 298 mn->it.start, mn->it.last);
@@ -243,14 +305,22 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
243 * to flush their object references upon which the object will 305 * to flush their object references upon which the object will
244 * be removed from the interval-tree, or the the range is 306 * be removed from the interval-tree, or the the range is
245 * still in use by another client and the overlap is invalid. 307 * still in use by another client and the overlap is invalid.
308 *
309 * If we do have an overlap, we cannot use the interval tree
310 * for fast range invalidation.
246 */ 311 */
247 312
248 obj = container_of(it, struct i915_mmu_object, it)->obj; 313 obj = container_of(it, struct i915_mmu_object, it)->obj;
249 ret = obj->userptr.workers ? -EAGAIN : -EINVAL; 314 if (!obj->userptr.workers)
250 } else { 315 mmu->has_linear = mn->is_linear = true;
316 else
317 ret = -EAGAIN;
318 } else
251 interval_tree_insert(&mn->it, &mmu->objects); 319 interval_tree_insert(&mn->it, &mmu->objects);
320
321 if (ret == 0) {
322 list_add(&mn->link, &mmu->linear);
252 __i915_mmu_notifier_update_serial(mmu); 323 __i915_mmu_notifier_update_serial(mmu);
253 ret = 0;
254 } 324 }
255 spin_unlock(&mmu->lock); 325 spin_unlock(&mmu->lock);
256 mutex_unlock(&mmu->dev->struct_mutex); 326 mutex_unlock(&mmu->dev->struct_mutex);
@@ -611,12 +681,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
611 * We impose several restrictions upon the memory being mapped 681 * We impose several restrictions upon the memory being mapped
612 * into the GPU. 682 * into the GPU.
613 * 1. It must be page aligned (both start/end addresses, i.e ptr and size). 683 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
614 * 2. It cannot overlap any other userptr object in the same address space. 684 * 2. It must be normal system memory, not a pointer into another map of IO
615 * 3. It must be normal system memory, not a pointer into another map of IO
616 * space (e.g. it must not be a GTT mmapping of another object). 685 * space (e.g. it must not be a GTT mmapping of another object).
617 * 4. We only allow a bo as large as we could in theory map into the GTT, 686 * 3. We only allow a bo as large as we could in theory map into the GTT,
618 * that is we limit the size to the total size of the GTT. 687 * that is we limit the size to the total size of the GTT.
619 * 5. The bo is marked as being snoopable. The backing pages are left 688 * 4. The bo is marked as being snoopable. The backing pages are left
620 * accessible directly by the CPU, but reads and writes by the GPU may 689 * accessible directly by the CPU, but reads and writes by the GPU may
621 * incur the cost of a snoop (unless you have an LLC architecture). 690 * incur the cost of a snoop (unless you have an LLC architecture).
622 * 691 *
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 45b6191efb58..0b3f69439451 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -764,7 +764,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
764 struct intel_engine_cs *ring, 764 struct intel_engine_cs *ring,
765 struct drm_i915_error_ring *ering) 765 struct drm_i915_error_ring *ering)
766{ 766{
767 struct intel_engine_cs *useless; 767 struct intel_engine_cs *to;
768 int i; 768 int i;
769 769
770 if (!i915_semaphore_is_enabled(dev_priv->dev)) 770 if (!i915_semaphore_is_enabled(dev_priv->dev))
@@ -776,13 +776,20 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
776 dev_priv->semaphore_obj, 776 dev_priv->semaphore_obj,
777 &dev_priv->gtt.base); 777 &dev_priv->gtt.base);
778 778
779 for_each_ring(useless, dev_priv, i) { 779 for_each_ring(to, dev_priv, i) {
780 u16 signal_offset = 780 int idx;
781 (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4; 781 u16 signal_offset;
782 u32 *tmp = error->semaphore_obj->pages[0]; 782 u32 *tmp;
783 783
784 ering->semaphore_mboxes[i] = tmp[signal_offset]; 784 if (ring == to)
785 ering->semaphore_seqno[i] = ring->semaphore.sync_seqno[i]; 785 continue;
786
787 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
788 tmp = error->semaphore_obj->pages[0];
789 idx = intel_ring_sync_index(ring, to);
790
791 ering->semaphore_mboxes[idx] = tmp[signal_offset];
792 ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
786 } 793 }
787} 794}
788 795
@@ -920,6 +927,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
920 return; 927 return;
921 928
922 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 929 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
930 if (!i915_gem_obj_ggtt_bound(obj))
931 continue;
932
923 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 933 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
924 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj); 934 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
925 break; 935 break;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 07e1a409e488..b55b2b9075b2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
136{ 136{
137 assert_spin_locked(&dev_priv->irq_lock); 137 assert_spin_locked(&dev_priv->irq_lock);
138 138
139 if (WARN_ON(dev_priv->pm.irqs_disabled)) 139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
140 return; 140 return;
141 141
142 if ((dev_priv->irq_mask & mask) != 0) { 142 if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151{ 151{
152 assert_spin_locked(&dev_priv->irq_lock); 152 assert_spin_locked(&dev_priv->irq_lock);
153 153
154 if (WARN_ON(dev_priv->pm.irqs_disabled)) 154 if (!intel_irqs_enabled(dev_priv))
155 return; 155 return;
156 156
157 if ((dev_priv->irq_mask & mask) != mask) { 157 if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
173{ 173{
174 assert_spin_locked(&dev_priv->irq_lock); 174 assert_spin_locked(&dev_priv->irq_lock);
175 175
176 if (WARN_ON(dev_priv->pm.irqs_disabled)) 176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
177 return; 177 return;
178 178
179 dev_priv->gt_irq_mask &= ~interrupt_mask; 179 dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
182 POSTING_READ(GTIMR); 182 POSTING_READ(GTIMR);
183} 183}
184 184
185void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 185void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186{ 186{
187 ilk_update_gt_irq(dev_priv, mask, mask); 187 ilk_update_gt_irq(dev_priv, mask, mask);
188} 188}
189 189
190void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191{ 191{
192 ilk_update_gt_irq(dev_priv, mask, 0); 192 ilk_update_gt_irq(dev_priv, mask, 0);
193} 193}
@@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
206 206
207 assert_spin_locked(&dev_priv->irq_lock); 207 assert_spin_locked(&dev_priv->irq_lock);
208 208
209 if (WARN_ON(dev_priv->pm.irqs_disabled)) 209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
210 return; 210 return;
211 211
212 new_val = dev_priv->pm_irq_mask; 212 new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
220 } 220 }
221} 221}
222 222
223void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 223void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224{ 224{
225 snb_update_pm_irq(dev_priv, mask, mask); 225 snb_update_pm_irq(dev_priv, mask, mask);
226} 226}
227 227
228void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 228void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229{ 229{
230 snb_update_pm_irq(dev_priv, mask, 0); 230 snb_update_pm_irq(dev_priv, mask, 0);
231} 231}
@@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
264 264
265 assert_spin_locked(&dev_priv->irq_lock); 265 assert_spin_locked(&dev_priv->irq_lock);
266 266
267 if (WARN_ON(dev_priv->pm.irqs_disabled)) 267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return; 268 return;
269 269
270 new_val = dev_priv->pm_irq_mask; 270 new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
278 } 278 }
279} 279}
280 280
281void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 281void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282{ 282{
283 bdw_update_pm_irq(dev_priv, mask, mask); 283 bdw_update_pm_irq(dev_priv, mask, mask);
284} 284}
285 285
286void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 286void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287{ 287{
288 bdw_update_pm_irq(dev_priv, mask, 0); 288 bdw_update_pm_irq(dev_priv, mask, 0);
289} 289}
@@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
420 420
421 assert_spin_locked(&dev_priv->irq_lock); 421 assert_spin_locked(&dev_priv->irq_lock);
422 422
423 if (WARN_ON(dev_priv->pm.irqs_disabled)) 423 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
424 return; 424 return;
425 425
426 I915_WRITE(SDEIMR, sdeimr); 426 I915_WRITE(SDEIMR, sdeimr);
@@ -1407,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
1407 spin_lock_irq(&dev_priv->irq_lock); 1407 spin_lock_irq(&dev_priv->irq_lock);
1408 pm_iir = dev_priv->rps.pm_iir; 1408 pm_iir = dev_priv->rps.pm_iir;
1409 dev_priv->rps.pm_iir = 0; 1409 dev_priv->rps.pm_iir = 0;
1410 if (IS_BROADWELL(dev_priv->dev)) 1410 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1411 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1411 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1412 else { 1412 else {
1413 /* Make sure not to corrupt PMIMR state used by ringbuffer */ 1413 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1414 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1414 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1415 } 1415 }
1416 spin_unlock_irq(&dev_priv->irq_lock); 1416 spin_unlock_irq(&dev_priv->irq_lock);
1417 1417
@@ -1553,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1553out: 1553out:
1554 WARN_ON(dev_priv->l3_parity.which_slice); 1554 WARN_ON(dev_priv->l3_parity.which_slice);
1555 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1555 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1556 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1556 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1558 1558
1559 mutex_unlock(&dev_priv->dev->struct_mutex); 1559 mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1567,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1567 return; 1567 return;
1568 1568
1569 spin_lock(&dev_priv->irq_lock); 1569 spin_lock(&dev_priv->irq_lock);
1570 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1570 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1571 spin_unlock(&dev_priv->irq_lock); 1571 spin_unlock(&dev_priv->irq_lock);
1572 1572
1573 iir &= GT_PARITY_ERROR(dev); 1573 iir &= GT_PARITY_ERROR(dev);
@@ -1622,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1622 1622
1623 spin_lock(&dev_priv->irq_lock); 1623 spin_lock(&dev_priv->irq_lock);
1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1625 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1625 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1626 spin_unlock(&dev_priv->irq_lock); 1626 spin_unlock(&dev_priv->irq_lock);
1627 1627
1628 queue_work(dev_priv->wq, &dev_priv->rps.work); 1628 queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1969,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1969 if (pm_iir & dev_priv->pm_rps_events) { 1969 if (pm_iir & dev_priv->pm_rps_events) {
1970 spin_lock(&dev_priv->irq_lock); 1970 spin_lock(&dev_priv->irq_lock);
1971 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1971 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1972 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1972 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1973 spin_unlock(&dev_priv->irq_lock); 1973 spin_unlock(&dev_priv->irq_lock);
1974 1974
1975 queue_work(dev_priv->wq, &dev_priv->rps.work); 1975 queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -3467,7 +3467,9 @@ static void gen8_irq_reset(struct drm_device *dev)
3467 gen8_gt_irq_reset(dev_priv); 3467 gen8_gt_irq_reset(dev_priv);
3468 3468
3469 for_each_pipe(pipe) 3469 for_each_pipe(pipe)
3470 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3470 if (intel_display_power_enabled(dev_priv,
3471 POWER_DOMAIN_PIPE(pipe)))
3472 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3471 3473
3472 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3474 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3473 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3475 GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3476,6 +3478,18 @@ static void gen8_irq_reset(struct drm_device *dev)
3476 ibx_irq_reset(dev); 3478 ibx_irq_reset(dev);
3477} 3479}
3478 3480
3481void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3482{
3483 unsigned long irqflags;
3484
3485 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3486 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3487 ~dev_priv->de_irq_mask[PIPE_B]);
3488 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3489 ~dev_priv->de_irq_mask[PIPE_C]);
3490 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3491}
3492
3479static void cherryview_irq_preinstall(struct drm_device *dev) 3493static void cherryview_irq_preinstall(struct drm_device *dev)
3480{ 3494{
3481 struct drm_i915_private *dev_priv = dev->dev_private; 3495 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3800,8 +3814,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3800 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3814 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3801 3815
3802 for_each_pipe(pipe) 3816 for_each_pipe(pipe)
3803 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], 3817 if (intel_display_power_enabled(dev_priv,
3804 de_pipe_enables); 3818 POWER_DOMAIN_PIPE(pipe)))
3819 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3820 dev_priv->de_irq_mask[pipe],
3821 de_pipe_enables);
3805 3822
3806 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3823 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3807} 3824}
@@ -4652,6 +4669,9 @@ void intel_irq_init(struct drm_device *dev)
4652 4669
4653 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4670 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4654 4671
4672 /* Haven't installed the IRQ handler yet */
4673 dev_priv->pm._irqs_disabled = true;
4674
4655 if (IS_GEN2(dev)) { 4675 if (IS_GEN2(dev)) {
4656 dev->max_vblank_count = 0; 4676 dev->max_vblank_count = 0;
4657 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4677 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4759,7 +4779,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4759 struct drm_i915_private *dev_priv = dev->dev_private; 4779 struct drm_i915_private *dev_priv = dev->dev_private;
4760 4780
4761 dev->driver->irq_uninstall(dev); 4781 dev->driver->irq_uninstall(dev);
4762 dev_priv->pm.irqs_disabled = true; 4782 dev_priv->pm._irqs_disabled = true;
4763} 4783}
4764 4784
4765/* Restore interrupts so we can recover from runtime PM. */ 4785/* Restore interrupts so we can recover from runtime PM. */
@@ -4767,7 +4787,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4767{ 4787{
4768 struct drm_i915_private *dev_priv = dev->dev_private; 4788 struct drm_i915_private *dev_priv = dev->dev_private;
4769 4789
4770 dev_priv->pm.irqs_disabled = false; 4790 dev_priv->pm._irqs_disabled = false;
4771 dev->driver->irq_preinstall(dev); 4791 dev->driver->irq_preinstall(dev);
4772 dev->driver->irq_postinstall(dev); 4792 dev->driver->irq_postinstall(dev);
4773} 4793}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 81457293cd3e..62ee8308d682 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -37,7 +37,7 @@ struct i915_params i915 __read_mostly = {
37 .enable_fbc = -1, 37 .enable_fbc = -1,
38 .enable_hangcheck = true, 38 .enable_hangcheck = true,
39 .enable_ppgtt = -1, 39 .enable_ppgtt = -1,
40 .enable_psr = 0, 40 .enable_psr = 1,
41 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 41 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
42 .disable_power_well = 1, 42 .disable_power_well = 1,
43 .enable_ips = 1, 43 .enable_ips = 1,
@@ -49,6 +49,7 @@ struct i915_params i915 __read_mostly = {
49 .enable_cmd_parser = 1, 49 .enable_cmd_parser = 1,
50 .disable_vtd_wa = 0, 50 .disable_vtd_wa = 0,
51 .use_mmio_flip = 0, 51 .use_mmio_flip = 0,
52 .mmio_debug = 0,
52}; 53};
53 54
54module_param_named(modeset, i915.modeset, int, 0400); 55module_param_named(modeset, i915.modeset, int, 0400);
@@ -118,7 +119,7 @@ MODULE_PARM_DESC(enable_ppgtt,
118 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 119 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
119 120
120module_param_named(enable_psr, i915.enable_psr, int, 0600); 121module_param_named(enable_psr, i915.enable_psr, int, 0600);
121MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 122MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
122 123
123module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); 124module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
124MODULE_PARM_DESC(preliminary_hw_support, 125MODULE_PARM_DESC(preliminary_hw_support,
@@ -161,3 +162,8 @@ MODULE_PARM_DESC(enable_cmd_parser,
161module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); 162module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
162MODULE_PARM_DESC(use_mmio_flip, 163MODULE_PARM_DESC(use_mmio_flip,
163 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); 164 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
165
166module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
167MODULE_PARM_DESC(mmio_debug,
168 "Enable the MMIO debug code (default: false). This may negatively "
169 "affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b0036cd3fe19..66dc39fb53c0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2281,7 +2281,7 @@ enum punit_power_well {
2281/* Same as Haswell, but 72064 bytes now. */ 2281/* Same as Haswell, but 72064 bytes now. */
2282#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) 2282#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
2283 2283
2284 2284#define CHV_CLK_CTL1 0x101100
2285#define VLV_CLK_CTL2 0x101104 2285#define VLV_CLK_CTL2 0x101104
2286#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 2286#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
2287 2287
@@ -5538,6 +5538,12 @@ enum punit_power_well {
5538 GEN6_PM_RP_DOWN_THRESHOLD | \ 5538 GEN6_PM_RP_DOWN_THRESHOLD | \
5539 GEN6_PM_RP_DOWN_TIMEOUT) 5539 GEN6_PM_RP_DOWN_TIMEOUT)
5540 5540
5541#define CHV_CZ_CLOCK_FREQ_MODE_200 200
5542#define CHV_CZ_CLOCK_FREQ_MODE_267 267
5543#define CHV_CZ_CLOCK_FREQ_MODE_320 320
5544#define CHV_CZ_CLOCK_FREQ_MODE_333 333
5545#define CHV_CZ_CLOCK_FREQ_MODE_400 400
5546
5541#define GEN7_GT_SCRATCH_BASE 0x4F100 5547#define GEN7_GT_SCRATCH_BASE 0x4F100
5542#define GEN7_GT_SCRATCH_REG_NUM 8 5548#define GEN7_GT_SCRATCH_REG_NUM 8
5543 5549
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 86ce39aad0ff..ae7fd8fc27f0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
47 47
48 intel_runtime_pm_get(dev_priv); 48 intel_runtime_pm_get(dev_priv);
49 49
50 /* On VLV, residency time is in CZ units rather than 1.28us */ 50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51 if (IS_VALLEYVIEW(dev)) { 51 if (IS_VALLEYVIEW(dev)) {
52 u32 clkctl2; 52 u32 reg, czcount_30ns;
53 53
54 clkctl2 = I915_READ(VLV_CLK_CTL2) >> 54 if (IS_CHERRYVIEW(dev))
55 CLK_CTL2_CZCOUNT_30NS_SHIFT; 55 reg = CHV_CLK_CTL1;
56 if (!clkctl2) { 56 else
57 WARN(!clkctl2, "bogus CZ count value"); 57 reg = VLV_CLK_CTL2;
58
59 czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
60
61 if (!czcount_30ns) {
62 WARN(!czcount_30ns, "bogus CZ count value");
58 ret = 0; 63 ret = 0;
59 goto out; 64 goto out;
60 } 65 }
61 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2); 66
67 units = 0;
68 div = 1000000ULL;
69
70 if (IS_CHERRYVIEW(dev)) {
71 /* Special case for 320Mhz */
72 if (czcount_30ns == 1) {
73 div = 10000000ULL;
74 units = 3125ULL;
75 } else {
76 /* chv counts are one less */
77 czcount_30ns += 1;
78 }
79 }
80
81 if (units == 0)
82 units = DIV_ROUND_UP_ULL(30ULL * bias,
83 (u64)czcount_30ns);
84
62 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 85 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
63 units <<= 8; 86 units <<= 8;
64 87
65 div = 1000000ULL * bias; 88 div = div * bias;
66 } 89 }
67 90
68 raw_time = I915_READ(reg) * units; 91 raw_time = I915_READ(reg) * units;
@@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
461 mutex_unlock(&dev->struct_mutex); 484 mutex_unlock(&dev->struct_mutex);
462 485
463 if (attr == &dev_attr_gt_RP0_freq_mhz) { 486 if (attr == &dev_attr_gt_RP0_freq_mhz) {
464 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; 487 if (IS_VALLEYVIEW(dev))
488 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
489 else
490 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
465 } else if (attr == &dev_attr_gt_RP1_freq_mhz) { 491 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
466 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; 492 if (IS_VALLEYVIEW(dev))
493 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
494 else
495 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
467 } else if (attr == &dev_attr_gt_RPn_freq_mhz) { 496 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
468 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; 497 if (IS_VALLEYVIEW(dev))
498 val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
499 else
500 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
469 } else { 501 } else {
470 BUG(); 502 BUG();
471 } 503 }
@@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = {
486 &dev_attr_gt_cur_freq_mhz.attr, 518 &dev_attr_gt_cur_freq_mhz.attr,
487 &dev_attr_gt_max_freq_mhz.attr, 519 &dev_attr_gt_max_freq_mhz.attr,
488 &dev_attr_gt_min_freq_mhz.attr, 520 &dev_attr_gt_min_freq_mhz.attr,
521 &dev_attr_gt_RP0_freq_mhz.attr,
522 &dev_attr_gt_RP1_freq_mhz.attr,
523 &dev_attr_gt_RPn_freq_mhz.attr,
489 &dev_attr_vlv_rpe_freq_mhz.attr, 524 &dev_attr_vlv_rpe_freq_mhz.attr,
490 NULL, 525 NULL,
491}; 526};
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 827498e081df..608ed302f24d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
336 336
337 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 337 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
338 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 338 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
339 dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
339 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " 340 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
340 "active %s, min brightness %u, level %u\n", 341 "active %s, min brightness %u, level %u\n",
341 dev_priv->vbt.backlight.pwm_freq_hz, 342 dev_priv->vbt.backlight.pwm_freq_hz,
342 dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", 343 dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
343 entry->min_brightness, 344 dev_priv->vbt.backlight.min_brightness,
344 backlight_data->level[panel_type]); 345 backlight_data->level[panel_type]);
345} 346}
346 347
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 88db4b6b6884..2efaf8e8d9c4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -660,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
660 struct intel_load_detect_pipe tmp; 660 struct intel_load_detect_pipe tmp;
661 struct drm_modeset_acquire_ctx ctx; 661 struct drm_modeset_acquire_ctx ctx;
662 662
663 intel_runtime_pm_get(dev_priv);
664
665 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 663 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
666 connector->base.id, connector->name, 664 connector->base.id, connector->name,
667 force); 665 force);
@@ -713,8 +711,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
713 711
714out: 712out:
715 intel_display_power_put(dev_priv, power_domain); 713 intel_display_power_put(dev_priv, power_domain);
716 intel_runtime_pm_put(dev_priv);
717
718 return status; 714 return status;
719} 715}
720 716
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 2d73430a0d27..5db0b5552e39 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1243,7 +1243,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1243 return val & WRPLL_PLL_ENABLE; 1243 return val & WRPLL_PLL_ENABLE;
1244} 1244}
1245 1245
1246static char *hsw_ddi_pll_names[] = { 1246static const char * const hsw_ddi_pll_names[] = {
1247 "WRPLL 1", 1247 "WRPLL 1",
1248 "WRPLL 2", 1248 "WRPLL 2",
1249}; 1249};
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7b542b477a4e..0f861301a94e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3855,7 +3855,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
3855 } 3855 }
3856 3856
3857 /* use legacy palette for Ironlake */ 3857 /* use legacy palette for Ironlake */
3858 if (HAS_PCH_SPLIT(dev)) 3858 if (!HAS_GMCH_DISPLAY(dev))
3859 palreg = LGC_PALETTE(pipe); 3859 palreg = LGC_PALETTE(pipe);
3860 3860
3861 /* Workaround : Do not read or write the pipe palette/gamma data while 3861 /* Workaround : Do not read or write the pipe palette/gamma data while
@@ -4894,35 +4894,21 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4894 } 4894 }
4895} 4895}
4896 4896
4897/** 4897/* Master function to enable/disable CRTC and corresponding power wells */
4898 * Sets the power management mode of the pipe and plane. 4898void intel_crtc_control(struct drm_crtc *crtc, bool enable)
4899 */
4900void intel_crtc_update_dpms(struct drm_crtc *crtc)
4901{ 4899{
4902 struct drm_device *dev = crtc->dev; 4900 struct drm_device *dev = crtc->dev;
4903 struct drm_i915_private *dev_priv = dev->dev_private; 4901 struct drm_i915_private *dev_priv = dev->dev_private;
4904 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4905 struct intel_encoder *intel_encoder;
4906 enum intel_display_power_domain domain; 4903 enum intel_display_power_domain domain;
4907 unsigned long domains; 4904 unsigned long domains;
4908 bool enable = false;
4909
4910 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4911 enable |= intel_encoder->connectors_active;
4912 4905
4913 if (enable) { 4906 if (enable) {
4914 if (!intel_crtc->active) { 4907 if (!intel_crtc->active) {
4915 /* 4908 domains = get_crtc_power_domains(crtc);
4916 * FIXME: DDI plls and relevant code isn't converted 4909 for_each_power_domain(domain, domains)
4917 * yet, so do runtime PM for DPMS only for all other 4910 intel_display_power_get(dev_priv, domain);
4918 * platforms for now. 4911 intel_crtc->enabled_power_domains = domains;
4919 */
4920 if (!HAS_DDI(dev)) {
4921 domains = get_crtc_power_domains(crtc);
4922 for_each_power_domain(domain, domains)
4923 intel_display_power_get(dev_priv, domain);
4924 intel_crtc->enabled_power_domains = domains;
4925 }
4926 4912
4927 dev_priv->display.crtc_enable(crtc); 4913 dev_priv->display.crtc_enable(crtc);
4928 } 4914 }
@@ -4930,14 +4916,27 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
4930 if (intel_crtc->active) { 4916 if (intel_crtc->active) {
4931 dev_priv->display.crtc_disable(crtc); 4917 dev_priv->display.crtc_disable(crtc);
4932 4918
4933 if (!HAS_DDI(dev)) { 4919 domains = intel_crtc->enabled_power_domains;
4934 domains = intel_crtc->enabled_power_domains; 4920 for_each_power_domain(domain, domains)
4935 for_each_power_domain(domain, domains) 4921 intel_display_power_put(dev_priv, domain);
4936 intel_display_power_put(dev_priv, domain); 4922 intel_crtc->enabled_power_domains = 0;
4937 intel_crtc->enabled_power_domains = 0;
4938 }
4939 } 4923 }
4940 } 4924 }
4925}
4926
4927/**
4928 * Sets the power management mode of the pipe and plane.
4929 */
4930void intel_crtc_update_dpms(struct drm_crtc *crtc)
4931{
4932 struct drm_device *dev = crtc->dev;
4933 struct intel_encoder *intel_encoder;
4934 bool enable = false;
4935
4936 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4937 enable |= intel_encoder->connectors_active;
4938
4939 intel_crtc_control(crtc, enable);
4941 4940
4942 intel_crtc_update_sarea(crtc, enable); 4941 intel_crtc_update_sarea(crtc, enable);
4943} 4942}
@@ -4957,10 +4956,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
4957 intel_crtc_update_sarea(crtc, false); 4956 intel_crtc_update_sarea(crtc, false);
4958 dev_priv->display.off(crtc); 4957 dev_priv->display.off(crtc);
4959 4958
4960 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4961 assert_cursor_disabled(dev_priv, pipe);
4962 assert_pipe_disabled(dev->dev_private, pipe);
4963
4964 if (crtc->primary->fb) { 4959 if (crtc->primary->fb) {
4965 mutex_lock(&dev->struct_mutex); 4960 mutex_lock(&dev->struct_mutex);
4966 intel_unpin_fb_obj(old_obj); 4961 intel_unpin_fb_obj(old_obj);
@@ -7360,8 +7355,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7360 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 7355 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7361 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 7356 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7362 "CPU PWM1 enabled\n"); 7357 "CPU PWM1 enabled\n");
7363 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 7358 if (IS_HASWELL(dev))
7364 "CPU PWM2 enabled\n"); 7359 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7360 "CPU PWM2 enabled\n");
7365 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 7361 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7366 "PCH PWM1 enabled\n"); 7362 "PCH PWM1 enabled\n");
7367 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 7363 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
@@ -7374,7 +7370,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7374 * gen-specific and since we only disable LCPLL after we fully disable 7370 * gen-specific and since we only disable LCPLL after we fully disable
7375 * the interrupts, the check below should be enough. 7371 * the interrupts, the check below should be enough.
7376 */ 7372 */
7377 WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n"); 7373 WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
7378} 7374}
7379 7375
7380static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 7376static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
@@ -8817,7 +8813,7 @@ static void intel_increase_pllclock(struct drm_device *dev,
8817 int dpll_reg = DPLL(pipe); 8813 int dpll_reg = DPLL(pipe);
8818 int dpll; 8814 int dpll;
8819 8815
8820 if (HAS_PCH_SPLIT(dev)) 8816 if (!HAS_GMCH_DISPLAY(dev))
8821 return; 8817 return;
8822 8818
8823 if (!dev_priv->lvds_downclock_avail) 8819 if (!dev_priv->lvds_downclock_avail)
@@ -8845,7 +8841,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
8845 struct drm_i915_private *dev_priv = dev->dev_private; 8841 struct drm_i915_private *dev_priv = dev->dev_private;
8846 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8842 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8847 8843
8848 if (HAS_PCH_SPLIT(dev)) 8844 if (!HAS_GMCH_DISPLAY(dev))
8849 return; 8845 return;
8850 8846
8851 if (!dev_priv->lvds_downclock_avail) 8847 if (!dev_priv->lvds_downclock_avail)
@@ -8976,7 +8972,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8976 8972
8977 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); 8973 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8978 8974
8979 intel_edp_psr_exit(dev); 8975 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
8980} 8976}
8981 8977
8982/** 8978/**
@@ -9002,7 +8998,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
9002 8998
9003 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 8999 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9004 9000
9005 intel_edp_psr_exit(dev); 9001 intel_edp_psr_flush(dev, frontbuffer_bits);
9006} 9002}
9007 9003
9008/** 9004/**
@@ -12825,6 +12821,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
12825 encoder->base.base.id, 12821 encoder->base.base.id,
12826 encoder->base.name); 12822 encoder->base.name);
12827 encoder->disable(encoder); 12823 encoder->disable(encoder);
12824 if (encoder->post_disable)
12825 encoder->post_disable(encoder);
12828 } 12826 }
12829 encoder->base.crtc = NULL; 12827 encoder->base.crtc = NULL;
12830 encoder->connectors_active = false; 12828 encoder->connectors_active = false;
@@ -13093,6 +13091,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
13093 */ 13091 */
13094 drm_irq_uninstall(dev); 13092 drm_irq_uninstall(dev);
13095 cancel_work_sync(&dev_priv->hotplug_work); 13093 cancel_work_sync(&dev_priv->hotplug_work);
13094 dev_priv->pm._irqs_disabled = true;
13095
13096 /* 13096 /*
13097 * Due to the hpd irq storm handling the hotplug work can re-arm the 13097 * Due to the hpd irq storm handling the hotplug work can re-arm the
13098 * poll handlers. Hence disable polling after hpd handling is shut down. 13098 * poll handlers. Hence disable polling after hpd handling is shut down.
@@ -13270,7 +13270,7 @@ intel_display_capture_error_state(struct drm_device *dev)
13270 13270
13271 error->pipe[i].source = I915_READ(PIPESRC(i)); 13271 error->pipe[i].source = I915_READ(PIPESRC(i));
13272 13272
13273 if (!HAS_PCH_SPLIT(dev)) 13273 if (HAS_GMCH_DISPLAY(dev))
13274 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 13274 error->pipe[i].stat = I915_READ(PIPESTAT(i));
13275 } 13275 }
13276 13276
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index e7a7953da6d1..0f05b88a75e3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1682,9 +1682,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1682 struct drm_i915_private *dev_priv = dev->dev_private; 1682 struct drm_i915_private *dev_priv = dev->dev_private;
1683 struct edp_vsc_psr psr_vsc; 1683 struct edp_vsc_psr psr_vsc;
1684 1684
1685 if (dev_priv->psr.setup_done)
1686 return;
1687
1688 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 1685 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1689 memset(&psr_vsc, 0, sizeof(psr_vsc)); 1686 memset(&psr_vsc, 0, sizeof(psr_vsc));
1690 psr_vsc.sdp_header.HB0 = 0; 1687 psr_vsc.sdp_header.HB0 = 0;
@@ -1696,8 +1693,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1696 /* Avoid continuous PSR exit by masking memup and hpd */ 1693 /* Avoid continuous PSR exit by masking memup and hpd */
1697 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | 1694 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1698 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); 1695 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1699
1700 dev_priv->psr.setup_done = true;
1701} 1696}
1702 1697
1703static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) 1698static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
@@ -1768,20 +1763,17 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1768 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1763 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1769 struct drm_device *dev = dig_port->base.base.dev; 1764 struct drm_device *dev = dig_port->base.base.dev;
1770 struct drm_i915_private *dev_priv = dev->dev_private; 1765 struct drm_i915_private *dev_priv = dev->dev_private;
1771 struct drm_crtc *crtc; 1766 struct drm_crtc *crtc = dig_port->base.base.crtc;
1772 struct intel_crtc *intel_crtc; 1767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1773 struct drm_i915_gem_object *obj;
1774 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1775 1768
1776 dev_priv->psr.source_ok = false; 1769 lockdep_assert_held(&dev_priv->psr.lock);
1770 lockdep_assert_held(&dev->struct_mutex);
1771 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1772 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1777 1773
1778 if (!HAS_PSR(dev)) { 1774 dev_priv->psr.source_ok = false;
1779 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1780 return false;
1781 }
1782 1775
1783 if (IS_HASWELL(dev) && (intel_encoder->type != INTEL_OUTPUT_EDP || 1776 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
1784 dig_port->port != PORT_A)) {
1785 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1777 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1786 return false; 1778 return false;
1787 } 1779 }
@@ -1791,34 +1783,10 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1791 return false; 1783 return false;
1792 } 1784 }
1793 1785
1794 crtc = dig_port->base.base.crtc;
1795 if (crtc == NULL) {
1796 DRM_DEBUG_KMS("crtc not active for PSR\n");
1797 return false;
1798 }
1799
1800 intel_crtc = to_intel_crtc(crtc);
1801 if (!intel_crtc_active(crtc)) {
1802 DRM_DEBUG_KMS("crtc not active for PSR\n");
1803 return false;
1804 }
1805
1806 obj = intel_fb_obj(crtc->primary->fb);
1807 if (obj->tiling_mode != I915_TILING_X ||
1808 obj->fence_reg == I915_FENCE_REG_NONE) {
1809 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1810 return false;
1811 }
1812
1813 /* Below limitations aren't valid for Broadwell */ 1786 /* Below limitations aren't valid for Broadwell */
1814 if (IS_BROADWELL(dev)) 1787 if (IS_BROADWELL(dev))
1815 goto out; 1788 goto out;
1816 1789
1817 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1818 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1819 return false;
1820 }
1821
1822 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1790 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1823 S3D_ENABLE) { 1791 S3D_ENABLE) {
1824 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1792 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
@@ -1841,8 +1809,9 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1841 struct drm_device *dev = intel_dig_port->base.base.dev; 1809 struct drm_device *dev = intel_dig_port->base.base.dev;
1842 struct drm_i915_private *dev_priv = dev->dev_private; 1810 struct drm_i915_private *dev_priv = dev->dev_private;
1843 1811
1844 if (intel_edp_is_psr_enabled(dev)) 1812 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1845 return; 1813 WARN_ON(dev_priv->psr.active);
1814 lockdep_assert_held(&dev_priv->psr.lock);
1846 1815
1847 /* Enable PSR on the panel */ 1816 /* Enable PSR on the panel */
1848 intel_edp_psr_enable_sink(intel_dp); 1817 intel_edp_psr_enable_sink(intel_dp);
@@ -1850,13 +1819,13 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1850 /* Enable PSR on the host */ 1819 /* Enable PSR on the host */
1851 intel_edp_psr_enable_source(intel_dp); 1820 intel_edp_psr_enable_source(intel_dp);
1852 1821
1853 dev_priv->psr.enabled = true;
1854 dev_priv->psr.active = true; 1822 dev_priv->psr.active = true;
1855} 1823}
1856 1824
1857void intel_edp_psr_enable(struct intel_dp *intel_dp) 1825void intel_edp_psr_enable(struct intel_dp *intel_dp)
1858{ 1826{
1859 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1827 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1828 struct drm_i915_private *dev_priv = dev->dev_private;
1860 1829
1861 if (!HAS_PSR(dev)) { 1830 if (!HAS_PSR(dev)) {
1862 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1831 DRM_DEBUG_KMS("PSR not supported on this platform\n");
@@ -1868,11 +1837,21 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp)
1868 return; 1837 return;
1869 } 1838 }
1870 1839
1840 mutex_lock(&dev_priv->psr.lock);
1841 if (dev_priv->psr.enabled) {
1842 DRM_DEBUG_KMS("PSR already in use\n");
1843 mutex_unlock(&dev_priv->psr.lock);
1844 return;
1845 }
1846
1847 dev_priv->psr.busy_frontbuffer_bits = 0;
1848
1871 /* Setup PSR once */ 1849 /* Setup PSR once */
1872 intel_edp_psr_setup(intel_dp); 1850 intel_edp_psr_setup(intel_dp);
1873 1851
1874 if (intel_edp_psr_match_conditions(intel_dp)) 1852 if (intel_edp_psr_match_conditions(intel_dp))
1875 intel_edp_psr_do_enable(intel_dp); 1853 dev_priv->psr.enabled = intel_dp;
1854 mutex_unlock(&dev_priv->psr.lock);
1876} 1855}
1877 1856
1878void intel_edp_psr_disable(struct intel_dp *intel_dp) 1857void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1880,76 +1859,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1880 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1859 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1881 struct drm_i915_private *dev_priv = dev->dev_private; 1860 struct drm_i915_private *dev_priv = dev->dev_private;
1882 1861
1883 if (!dev_priv->psr.enabled) 1862 mutex_lock(&dev_priv->psr.lock);
1863 if (!dev_priv->psr.enabled) {
1864 mutex_unlock(&dev_priv->psr.lock);
1884 return; 1865 return;
1866 }
1885 1867
1886 I915_WRITE(EDP_PSR_CTL(dev), 1868 if (dev_priv->psr.active) {
1887 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); 1869 I915_WRITE(EDP_PSR_CTL(dev),
1870 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1888 1871
1889 /* Wait till PSR is idle */ 1872 /* Wait till PSR is idle */
1890 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & 1873 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1891 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1874 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1892 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1875 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1893 1876
1894 dev_priv->psr.enabled = false; 1877 dev_priv->psr.active = false;
1878 } else {
1879 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1880 }
1881
1882 dev_priv->psr.enabled = NULL;
1883 mutex_unlock(&dev_priv->psr.lock);
1884
1885 cancel_delayed_work_sync(&dev_priv->psr.work);
1895} 1886}
1896 1887
1897static void intel_edp_psr_work(struct work_struct *work) 1888static void intel_edp_psr_work(struct work_struct *work)
1898{ 1889{
1899 struct drm_i915_private *dev_priv = 1890 struct drm_i915_private *dev_priv =
1900 container_of(work, typeof(*dev_priv), psr.work.work); 1891 container_of(work, typeof(*dev_priv), psr.work.work);
1901 struct drm_device *dev = dev_priv->dev; 1892 struct intel_dp *intel_dp = dev_priv->psr.enabled;
1902 struct intel_encoder *encoder;
1903 struct intel_dp *intel_dp = NULL;
1904 1893
1905 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) 1894 mutex_lock(&dev_priv->psr.lock);
1906 if (encoder->type == INTEL_OUTPUT_EDP) { 1895 intel_dp = dev_priv->psr.enabled;
1907 intel_dp = enc_to_intel_dp(&encoder->base);
1908 1896
1909 if (!intel_edp_psr_match_conditions(intel_dp)) 1897 if (!intel_dp)
1910 intel_edp_psr_disable(intel_dp); 1898 goto unlock;
1911 else 1899
1912 intel_edp_psr_do_enable(intel_dp); 1900 /*
1913 } 1901 * The delayed work can race with an invalidate hence we need to
1902 * recheck. Since psr_flush first clears this and then reschedules we
1903 * won't ever miss a flush when bailing out here.
1904 */
1905 if (dev_priv->psr.busy_frontbuffer_bits)
1906 goto unlock;
1907
1908 intel_edp_psr_do_enable(intel_dp);
1909unlock:
1910 mutex_unlock(&dev_priv->psr.lock);
1914} 1911}
1915 1912
1916static void intel_edp_psr_inactivate(struct drm_device *dev) 1913static void intel_edp_psr_do_exit(struct drm_device *dev)
1917{ 1914{
1918 struct drm_i915_private *dev_priv = dev->dev_private; 1915 struct drm_i915_private *dev_priv = dev->dev_private;
1919 1916
1920 dev_priv->psr.active = false; 1917 if (dev_priv->psr.active) {
1918 u32 val = I915_READ(EDP_PSR_CTL(dev));
1919
1920 WARN_ON(!(val & EDP_PSR_ENABLE));
1921
1922 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
1923
1924 dev_priv->psr.active = false;
1925 }
1921 1926
1922 I915_WRITE(EDP_PSR_CTL(dev), I915_READ(EDP_PSR_CTL(dev))
1923 & ~EDP_PSR_ENABLE);
1924} 1927}
1925 1928
1926void intel_edp_psr_exit(struct drm_device *dev) 1929void intel_edp_psr_invalidate(struct drm_device *dev,
1930 unsigned frontbuffer_bits)
1927{ 1931{
1928 struct drm_i915_private *dev_priv = dev->dev_private; 1932 struct drm_i915_private *dev_priv = dev->dev_private;
1933 struct drm_crtc *crtc;
1934 enum pipe pipe;
1929 1935
1930 if (!HAS_PSR(dev)) 1936 mutex_lock(&dev_priv->psr.lock);
1937 if (!dev_priv->psr.enabled) {
1938 mutex_unlock(&dev_priv->psr.lock);
1931 return; 1939 return;
1940 }
1932 1941
1933 if (!dev_priv->psr.setup_done) 1942 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1934 return; 1943 pipe = to_intel_crtc(crtc)->pipe;
1935 1944
1936 cancel_delayed_work_sync(&dev_priv->psr.work); 1945 intel_edp_psr_do_exit(dev);
1937 1946
1938 if (dev_priv->psr.active) 1947 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1939 intel_edp_psr_inactivate(dev);
1940 1948
1941 schedule_delayed_work(&dev_priv->psr.work, 1949 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1942 msecs_to_jiffies(100)); 1950 mutex_unlock(&dev_priv->psr.lock);
1943} 1951}
1944 1952
1945void intel_edp_psr_init(struct drm_device *dev) 1953void intel_edp_psr_flush(struct drm_device *dev,
1954 unsigned frontbuffer_bits)
1946{ 1955{
1947 struct drm_i915_private *dev_priv = dev->dev_private; 1956 struct drm_i915_private *dev_priv = dev->dev_private;
1957 struct drm_crtc *crtc;
1958 enum pipe pipe;
1948 1959
1949 if (!HAS_PSR(dev)) 1960 mutex_lock(&dev_priv->psr.lock);
1961 if (!dev_priv->psr.enabled) {
1962 mutex_unlock(&dev_priv->psr.lock);
1950 return; 1963 return;
1964 }
1965
1966 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1967 pipe = to_intel_crtc(crtc)->pipe;
1968 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1969
1970 /*
1971 * On Haswell sprite plane updates don't result in a psr invalidating
1972 * signal in the hardware. Which means we need to manually fake this in
1973 * software for all flushes, not just when we've seen a preceding
1974 * invalidation through frontbuffer rendering.
1975 */
1976 if (IS_HASWELL(dev) &&
1977 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
1978 intel_edp_psr_do_exit(dev);
1979
1980 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1981 schedule_delayed_work(&dev_priv->psr.work,
1982 msecs_to_jiffies(100));
1983 mutex_unlock(&dev_priv->psr.lock);
1984}
1985
1986void intel_edp_psr_init(struct drm_device *dev)
1987{
1988 struct drm_i915_private *dev_priv = dev->dev_private;
1951 1989
1952 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work); 1990 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
1991 mutex_init(&dev_priv->psr.lock);
1953} 1992}
1954 1993
1955static void intel_disable_dp(struct intel_encoder *encoder) 1994static void intel_disable_dp(struct intel_encoder *encoder)
@@ -3681,8 +3720,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3681 struct edid *edid = NULL; 3720 struct edid *edid = NULL;
3682 bool ret; 3721 bool ret;
3683 3722
3684 intel_runtime_pm_get(dev_priv);
3685
3686 power_domain = intel_display_port_power_domain(intel_encoder); 3723 power_domain = intel_display_port_power_domain(intel_encoder);
3687 intel_display_power_get(dev_priv, power_domain); 3724 intel_display_power_get(dev_priv, power_domain);
3688 3725
@@ -3735,9 +3772,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3735 3772
3736out: 3773out:
3737 intel_display_power_put(dev_priv, power_domain); 3774 intel_display_power_put(dev_priv, power_domain);
3738
3739 intel_runtime_pm_put(dev_priv);
3740
3741 return status; 3775 return status;
3742} 3776}
3743 3777
@@ -4262,6 +4296,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4262 return; 4296 return;
4263 } 4297 }
4264 4298
4299 /*
4300 * FIXME: This needs proper synchronization with psr state. But really
4301 * hard to tell without seeing the user of this function of this code.
4302 * Check locking and ordering once that lands.
4303 */
4265 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { 4304 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4266 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); 4305 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4267 return; 4306 return;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1dfd1e518551..b2837c5fccfc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -166,6 +166,7 @@ struct intel_panel {
166 struct { 166 struct {
167 bool present; 167 bool present;
168 u32 level; 168 u32 level;
169 u32 min;
169 u32 max; 170 u32 max;
170 bool enabled; 171 bool enabled;
171 bool combination_mode; /* gen 2/4 only */ 172 bool combination_mode; /* gen 2/4 only */
@@ -431,6 +432,7 @@ struct intel_crtc {
431 432
432struct intel_plane_wm_parameters { 433struct intel_plane_wm_parameters {
433 uint32_t horiz_pixels; 434 uint32_t horiz_pixels;
435 uint32_t vert_pixels;
434 uint8_t bytes_per_pixel; 436 uint8_t bytes_per_pixel;
435 bool enabled; 437 bool enabled;
436 bool scaled; 438 bool scaled;
@@ -506,6 +508,7 @@ struct intel_hdmi {
506 bool has_audio; 508 bool has_audio;
507 enum hdmi_force_audio force_audio; 509 enum hdmi_force_audio force_audio;
508 bool rgb_quant_range_selectable; 510 bool rgb_quant_range_selectable;
511 enum hdmi_picture_aspect aspect_ratio;
509 void (*write_infoframe)(struct drm_encoder *encoder, 512 void (*write_infoframe)(struct drm_encoder *encoder,
510 enum hdmi_infoframe_type type, 513 enum hdmi_infoframe_type type,
511 const void *frame, ssize_t len); 514 const void *frame, ssize_t len);
@@ -711,17 +714,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
711bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 714bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
712 enum transcoder pch_transcoder, 715 enum transcoder pch_transcoder,
713 bool enable); 716 bool enable);
714void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 717void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
715void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 718void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
716void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 719void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
717void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 720void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
718void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 721void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
719void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 722void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
720void intel_runtime_pm_disable_interrupts(struct drm_device *dev); 723void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
721void intel_runtime_pm_restore_interrupts(struct drm_device *dev); 724void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
725static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
726{
727 /*
728 * We only use drm_irq_uninstall() at unload and VT switch, so
729 * this is the only thing we need to check.
730 */
731 return !dev_priv->pm._irqs_disabled;
732}
733
722int intel_get_crtc_scanline(struct intel_crtc *crtc); 734int intel_get_crtc_scanline(struct intel_crtc *crtc);
723void i9xx_check_fifo_underruns(struct drm_device *dev); 735void i9xx_check_fifo_underruns(struct drm_device *dev);
724 736void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
725 737
726/* intel_crt.c */ 738/* intel_crt.c */
727void intel_crt_init(struct drm_device *dev); 739void intel_crt_init(struct drm_device *dev);
@@ -787,6 +799,7 @@ void intel_frontbuffer_flip(struct drm_device *dev,
787void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); 799void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
788void intel_mark_idle(struct drm_device *dev); 800void intel_mark_idle(struct drm_device *dev);
789void intel_crtc_restore_mode(struct drm_crtc *crtc); 801void intel_crtc_restore_mode(struct drm_crtc *crtc);
802void intel_crtc_control(struct drm_crtc *crtc, bool enable);
790void intel_crtc_update_dpms(struct drm_crtc *crtc); 803void intel_crtc_update_dpms(struct drm_crtc *crtc);
791void intel_encoder_destroy(struct drm_encoder *encoder); 804void intel_encoder_destroy(struct drm_encoder *encoder);
792void intel_connector_dpms(struct drm_connector *, int mode); 805void intel_connector_dpms(struct drm_connector *, int mode);
@@ -901,7 +914,10 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
901void intel_edp_psr_enable(struct intel_dp *intel_dp); 914void intel_edp_psr_enable(struct intel_dp *intel_dp);
902void intel_edp_psr_disable(struct intel_dp *intel_dp); 915void intel_edp_psr_disable(struct intel_dp *intel_dp);
903void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); 916void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
904void intel_edp_psr_exit(struct drm_device *dev); 917void intel_edp_psr_invalidate(struct drm_device *dev,
918 unsigned frontbuffer_bits);
919void intel_edp_psr_flush(struct drm_device *dev,
920 unsigned frontbuffer_bits);
905void intel_edp_psr_init(struct drm_device *dev); 921void intel_edp_psr_init(struct drm_device *dev);
906 922
907int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd); 923int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
@@ -997,8 +1013,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
997void intel_gmch_panel_fitting(struct intel_crtc *crtc, 1013void intel_gmch_panel_fitting(struct intel_crtc *crtc,
998 struct intel_crtc_config *pipe_config, 1014 struct intel_crtc_config *pipe_config,
999 int fitting_mode); 1015 int fitting_mode);
1000void intel_panel_set_backlight(struct intel_connector *connector, u32 level, 1016void intel_panel_set_backlight_acpi(struct intel_connector *connector,
1001 u32 max); 1017 u32 level, u32 max);
1002int intel_panel_setup_backlight(struct drm_connector *connector); 1018int intel_panel_setup_backlight(struct drm_connector *connector);
1003void intel_panel_enable_backlight(struct intel_connector *connector); 1019void intel_panel_enable_backlight(struct intel_connector *connector);
1004void intel_panel_disable_backlight(struct intel_connector *connector); 1020void intel_panel_disable_backlight(struct intel_connector *connector);
@@ -1017,7 +1033,9 @@ int ilk_wm_max_level(const struct drm_device *dev);
1017void intel_update_watermarks(struct drm_crtc *crtc); 1033void intel_update_watermarks(struct drm_crtc *crtc);
1018void intel_update_sprite_watermarks(struct drm_plane *plane, 1034void intel_update_sprite_watermarks(struct drm_plane *plane,
1019 struct drm_crtc *crtc, 1035 struct drm_crtc *crtc,
1020 uint32_t sprite_width, int pixel_size, 1036 uint32_t sprite_width,
1037 uint32_t sprite_height,
1038 int pixel_size,
1021 bool enabled, bool scaled); 1039 bool enabled, bool scaled);
1022void intel_init_pm(struct drm_device *dev); 1040void intel_init_pm(struct drm_device *dev);
1023void intel_pm_setup(struct drm_device *dev); 1041void intel_pm_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 24224131ebf1..f9151f6641d9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
367 union hdmi_infoframe frame; 367 union hdmi_infoframe frame;
368 int ret; 368 int ret;
369 369
370 /* Set user selected PAR to incoming mode's member */
371 adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
372
370 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 373 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
371 adjusted_mode); 374 adjusted_mode);
372 if (ret < 0) { 375 if (ret < 0) {
@@ -879,7 +882,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
879 struct intel_encoder *encoder; 882 struct intel_encoder *encoder;
880 int count = 0, count_hdmi = 0; 883 int count = 0, count_hdmi = 0;
881 884
882 if (!HAS_PCH_SPLIT(dev)) 885 if (HAS_GMCH_DISPLAY(dev))
883 return false; 886 return false;
884 887
885 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 888 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
@@ -1124,6 +1127,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
1124 goto done; 1127 goto done;
1125 } 1128 }
1126 1129
1130 if (property == connector->dev->mode_config.aspect_ratio_property) {
1131 switch (val) {
1132 case DRM_MODE_PICTURE_ASPECT_NONE:
1133 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
1134 break;
1135 case DRM_MODE_PICTURE_ASPECT_4_3:
1136 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
1137 break;
1138 case DRM_MODE_PICTURE_ASPECT_16_9:
1139 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
1140 break;
1141 default:
1142 return -EINVAL;
1143 }
1144 goto done;
1145 }
1146
1127 return -EINVAL; 1147 return -EINVAL;
1128 1148
1129done: 1149done:
@@ -1480,11 +1500,22 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
1480}; 1500};
1481 1501
1482static void 1502static void
1503intel_attach_aspect_ratio_property(struct drm_connector *connector)
1504{
1505 if (!drm_mode_create_aspect_ratio_property(connector->dev))
1506 drm_object_attach_property(&connector->base,
1507 connector->dev->mode_config.aspect_ratio_property,
1508 DRM_MODE_PICTURE_ASPECT_NONE);
1509}
1510
1511static void
1483intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 1512intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
1484{ 1513{
1485 intel_attach_force_audio_property(connector); 1514 intel_attach_force_audio_property(connector);
1486 intel_attach_broadcast_rgb_property(connector); 1515 intel_attach_broadcast_rgb_property(connector);
1487 intel_hdmi->color_range_auto = true; 1516 intel_hdmi->color_range_auto = true;
1517 intel_attach_aspect_ratio_property(connector);
1518 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
1488} 1519}
1489 1520
1490void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 1521void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1531,7 +1562,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1531 if (IS_VALLEYVIEW(dev)) { 1562 if (IS_VALLEYVIEW(dev)) {
1532 intel_hdmi->write_infoframe = vlv_write_infoframe; 1563 intel_hdmi->write_infoframe = vlv_write_infoframe;
1533 intel_hdmi->set_infoframes = vlv_set_infoframes; 1564 intel_hdmi->set_infoframes = vlv_set_infoframes;
1534 } else if (!HAS_PCH_SPLIT(dev)) { 1565 } else if (IS_G4X(dev)) {
1535 intel_hdmi->write_infoframe = g4x_write_infoframe; 1566 intel_hdmi->write_infoframe = g4x_write_infoframe;
1536 intel_hdmi->set_infoframes = g4x_set_infoframes; 1567 intel_hdmi->set_infoframes = g4x_set_infoframes;
1537 } else if (HAS_DDI(dev)) { 1568 } else if (HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d37934b6338e..97bc3eab9cd6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -419,7 +419,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
419 */ 419 */
420 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); 420 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
421 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) 421 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
422 intel_panel_set_backlight(intel_connector, bclp, 255); 422 intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
423 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 423 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
424 424
425 drm_modeset_unlock(&dev->mode_config.connection_mutex); 425 drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 38a98570d10c..f2d5f2ebcdde 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev)
398 } 398 }
399} 399}
400 400
401/**
402 * scale - scale values from one range to another
403 *
404 * @source_val: value in range [@source_min..@source_max]
405 *
406 * Return @source_val in range [@source_min..@source_max] scaled to range
407 * [@target_min..@target_max].
408 */
409static uint32_t scale(uint32_t source_val,
410 uint32_t source_min, uint32_t source_max,
411 uint32_t target_min, uint32_t target_max)
412{
413 uint64_t target_val;
414
415 WARN_ON(source_min > source_max);
416 WARN_ON(target_min > target_max);
417
418 /* defensive */
419 source_val = clamp(source_val, source_min, source_max);
420
421 /* avoid overflows */
422 target_val = (uint64_t)(source_val - source_min) *
423 (target_max - target_min);
424 do_div(target_val, source_max - source_min);
425 target_val += target_min;
426
427 return target_val;
428}
429
430/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
431static inline u32 scale_user_to_hw(struct intel_connector *connector,
432 u32 user_level, u32 user_max)
433{
434 struct intel_panel *panel = &connector->panel;
435
436 return scale(user_level, 0, user_max,
437 panel->backlight.min, panel->backlight.max);
438}
439
440/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
441 * to [hw_min..hw_max]. */
442static inline u32 clamp_user_to_hw(struct intel_connector *connector,
443 u32 user_level, u32 user_max)
444{
445 struct intel_panel *panel = &connector->panel;
446 u32 hw_level;
447
448 hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
449 hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
450
451 return hw_level;
452}
453
454/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
455static inline u32 scale_hw_to_user(struct intel_connector *connector,
456 u32 hw_level, u32 user_max)
457{
458 struct intel_panel *panel = &connector->panel;
459
460 return scale(hw_level, panel->backlight.min, panel->backlight.max,
461 0, user_max);
462}
463
401static u32 intel_panel_compute_brightness(struct intel_connector *connector, 464static u32 intel_panel_compute_brightness(struct intel_connector *connector,
402 u32 val) 465 u32 val)
403{ 466{
@@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
557 dev_priv->display.set_backlight(connector, level); 620 dev_priv->display.set_backlight(connector, level);
558} 621}
559 622
560/* set backlight brightness to level in range [0..max] */ 623/* set backlight brightness to level in range [0..max], scaling wrt hw min */
561void intel_panel_set_backlight(struct intel_connector *connector, u32 level, 624static void intel_panel_set_backlight(struct intel_connector *connector,
562 u32 max) 625 u32 user_level, u32 user_max)
563{ 626{
564 struct drm_device *dev = connector->base.dev; 627 struct drm_device *dev = connector->base.dev;
565 struct drm_i915_private *dev_priv = dev->dev_private; 628 struct drm_i915_private *dev_priv = dev->dev_private;
566 struct intel_panel *panel = &connector->panel; 629 struct intel_panel *panel = &connector->panel;
567 enum pipe pipe = intel_get_pipe_from_connector(connector); 630 enum pipe pipe = intel_get_pipe_from_connector(connector);
568 u32 freq; 631 u32 hw_level;
569 unsigned long flags; 632 unsigned long flags;
570 u64 n;
571 633
572 if (!panel->backlight.present || pipe == INVALID_PIPE) 634 if (!panel->backlight.present || pipe == INVALID_PIPE)
573 return; 635 return;
@@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
576 638
577 WARN_ON(panel->backlight.max == 0); 639 WARN_ON(panel->backlight.max == 0);
578 640
579 /* scale to hardware max, but be careful to not overflow */ 641 hw_level = scale_user_to_hw(connector, user_level, user_max);
580 freq = panel->backlight.max; 642 panel->backlight.level = hw_level;
581 n = (u64)level * freq; 643
582 do_div(n, max); 644 if (panel->backlight.enabled)
583 level = n; 645 intel_panel_actually_set_backlight(connector, hw_level);
646
647 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
648}
649
650/* set backlight brightness to level in range [0..max], assuming hw min is
651 * respected.
652 */
653void intel_panel_set_backlight_acpi(struct intel_connector *connector,
654 u32 user_level, u32 user_max)
655{
656 struct drm_device *dev = connector->base.dev;
657 struct drm_i915_private *dev_priv = dev->dev_private;
658 struct intel_panel *panel = &connector->panel;
659 enum pipe pipe = intel_get_pipe_from_connector(connector);
660 u32 hw_level;
661 unsigned long flags;
662
663 if (!panel->backlight.present || pipe == INVALID_PIPE)
664 return;
665
666 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
667
668 WARN_ON(panel->backlight.max == 0);
669
670 hw_level = clamp_user_to_hw(connector, user_level, user_max);
671 panel->backlight.level = hw_level;
584 672
585 panel->backlight.level = level;
586 if (panel->backlight.device) 673 if (panel->backlight.device)
587 panel->backlight.device->props.brightness = level; 674 panel->backlight.device->props.brightness =
675 scale_hw_to_user(connector,
676 panel->backlight.level,
677 panel->backlight.device->props.max_brightness);
588 678
589 if (panel->backlight.enabled) 679 if (panel->backlight.enabled)
590 intel_panel_actually_set_backlight(connector, level); 680 intel_panel_actually_set_backlight(connector, hw_level);
591 681
592 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 682 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
593} 683}
@@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
860 panel->backlight.level = panel->backlight.max; 950 panel->backlight.level = panel->backlight.max;
861 if (panel->backlight.device) 951 if (panel->backlight.device)
862 panel->backlight.device->props.brightness = 952 panel->backlight.device->props.brightness =
863 panel->backlight.level; 953 scale_hw_to_user(connector,
954 panel->backlight.level,
955 panel->backlight.device->props.max_brightness);
864 } 956 }
865 957
866 dev_priv->display.enable_backlight(connector); 958 dev_priv->display.enable_backlight(connector);
@@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
889 struct intel_connector *connector = bl_get_data(bd); 981 struct intel_connector *connector = bl_get_data(bd);
890 struct drm_device *dev = connector->base.dev; 982 struct drm_device *dev = connector->base.dev;
891 struct drm_i915_private *dev_priv = dev->dev_private; 983 struct drm_i915_private *dev_priv = dev->dev_private;
984 u32 hw_level;
892 int ret; 985 int ret;
893 986
894 intel_runtime_pm_get(dev_priv); 987 intel_runtime_pm_get(dev_priv);
895 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 988 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
896 ret = intel_panel_get_backlight(connector); 989
990 hw_level = intel_panel_get_backlight(connector);
991 ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
992
897 drm_modeset_unlock(&dev->mode_config.connection_mutex); 993 drm_modeset_unlock(&dev->mode_config.connection_mutex);
898 intel_runtime_pm_put(dev_priv); 994 intel_runtime_pm_put(dev_priv);
899 995
@@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector)
913 if (WARN_ON(panel->backlight.device)) 1009 if (WARN_ON(panel->backlight.device))
914 return -ENODEV; 1010 return -ENODEV;
915 1011
916 BUG_ON(panel->backlight.max == 0); 1012 WARN_ON(panel->backlight.max == 0);
917 1013
918 memset(&props, 0, sizeof(props)); 1014 memset(&props, 0, sizeof(props));
919 props.type = BACKLIGHT_RAW; 1015 props.type = BACKLIGHT_RAW;
920 props.brightness = panel->backlight.level; 1016
1017 /*
1018 * Note: Everything should work even if the backlight device max
1019 * presented to the userspace is arbitrarily chosen.
1020 */
921 props.max_brightness = panel->backlight.max; 1021 props.max_brightness = panel->backlight.max;
1022 props.brightness = scale_hw_to_user(connector,
1023 panel->backlight.level,
1024 props.max_brightness);
922 1025
923 /* 1026 /*
924 * Note: using the same name independent of the connector prevents 1027 * Note: using the same name independent of the connector prevents
@@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
964 * XXX: Query mode clock or hardware clock and program PWM modulation frequency 1067 * XXX: Query mode clock or hardware clock and program PWM modulation frequency
965 * appropriately when it's 0. Use VBT and/or sane defaults. 1068 * appropriately when it's 0. Use VBT and/or sane defaults.
966 */ 1069 */
1070static u32 get_backlight_min_vbt(struct intel_connector *connector)
1071{
1072 struct drm_device *dev = connector->base.dev;
1073 struct drm_i915_private *dev_priv = dev->dev_private;
1074 struct intel_panel *panel = &connector->panel;
1075
1076 WARN_ON(panel->backlight.max == 0);
1077
1078 /* vbt value is a coefficient in range [0..255] */
1079 return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
1080 0, panel->backlight.max);
1081}
1082
967static int bdw_setup_backlight(struct intel_connector *connector) 1083static int bdw_setup_backlight(struct intel_connector *connector)
968{ 1084{
969 struct drm_device *dev = connector->base.dev; 1085 struct drm_device *dev = connector->base.dev;
@@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector)
979 if (!panel->backlight.max) 1095 if (!panel->backlight.max)
980 return -ENODEV; 1096 return -ENODEV;
981 1097
1098 panel->backlight.min = get_backlight_min_vbt(connector);
1099
982 val = bdw_get_backlight(connector); 1100 val = bdw_get_backlight(connector);
983 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1101 panel->backlight.level = intel_panel_compute_brightness(connector, val);
984 1102
@@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector)
1003 if (!panel->backlight.max) 1121 if (!panel->backlight.max)
1004 return -ENODEV; 1122 return -ENODEV;
1005 1123
1124 panel->backlight.min = get_backlight_min_vbt(connector);
1125
1006 val = pch_get_backlight(connector); 1126 val = pch_get_backlight(connector);
1007 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1127 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1008 1128
@@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
1035 if (!panel->backlight.max) 1155 if (!panel->backlight.max)
1036 return -ENODEV; 1156 return -ENODEV;
1037 1157
1158 panel->backlight.min = get_backlight_min_vbt(connector);
1159
1038 val = i9xx_get_backlight(connector); 1160 val = i9xx_get_backlight(connector);
1039 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1161 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1040 1162
@@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector)
1062 if (!panel->backlight.max) 1184 if (!panel->backlight.max)
1063 return -ENODEV; 1185 return -ENODEV;
1064 1186
1187 panel->backlight.min = get_backlight_min_vbt(connector);
1188
1065 val = i9xx_get_backlight(connector); 1189 val = i9xx_get_backlight(connector);
1066 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1190 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1067 1191
@@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector)
1099 if (!panel->backlight.max) 1223 if (!panel->backlight.max)
1100 return -ENODEV; 1224 return -ENODEV;
1101 1225
1226 panel->backlight.min = get_backlight_min_vbt(connector);
1227
1102 val = _vlv_get_backlight(dev, PIPE_A); 1228 val = _vlv_get_backlight(dev, PIPE_A);
1103 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1229 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1104 1230
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 780c3ab26f4f..3f88f29a98c0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2743,10 +2743,11 @@ static void ilk_update_wm(struct drm_crtc *crtc)
2743 ilk_write_wm_values(dev_priv, &results); 2743 ilk_write_wm_values(dev_priv, &results);
2744} 2744}
2745 2745
2746static void ilk_update_sprite_wm(struct drm_plane *plane, 2746static void
2747 struct drm_crtc *crtc, 2747ilk_update_sprite_wm(struct drm_plane *plane,
2748 uint32_t sprite_width, int pixel_size, 2748 struct drm_crtc *crtc,
2749 bool enabled, bool scaled) 2749 uint32_t sprite_width, uint32_t sprite_height,
2750 int pixel_size, bool enabled, bool scaled)
2750{ 2751{
2751 struct drm_device *dev = plane->dev; 2752 struct drm_device *dev = plane->dev;
2752 struct intel_plane *intel_plane = to_intel_plane(plane); 2753 struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2754,6 +2755,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane,
2754 intel_plane->wm.enabled = enabled; 2755 intel_plane->wm.enabled = enabled;
2755 intel_plane->wm.scaled = scaled; 2756 intel_plane->wm.scaled = scaled;
2756 intel_plane->wm.horiz_pixels = sprite_width; 2757 intel_plane->wm.horiz_pixels = sprite_width;
2758 intel_plane->wm.vert_pixels = sprite_width;
2757 intel_plane->wm.bytes_per_pixel = pixel_size; 2759 intel_plane->wm.bytes_per_pixel = pixel_size;
2758 2760
2759 /* 2761 /*
@@ -2888,13 +2890,16 @@ void intel_update_watermarks(struct drm_crtc *crtc)
2888 2890
2889void intel_update_sprite_watermarks(struct drm_plane *plane, 2891void intel_update_sprite_watermarks(struct drm_plane *plane,
2890 struct drm_crtc *crtc, 2892 struct drm_crtc *crtc,
2891 uint32_t sprite_width, int pixel_size, 2893 uint32_t sprite_width,
2894 uint32_t sprite_height,
2895 int pixel_size,
2892 bool enabled, bool scaled) 2896 bool enabled, bool scaled)
2893{ 2897{
2894 struct drm_i915_private *dev_priv = plane->dev->dev_private; 2898 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2895 2899
2896 if (dev_priv->display.update_sprite_wm) 2900 if (dev_priv->display.update_sprite_wm)
2897 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width, 2901 dev_priv->display.update_sprite_wm(plane, crtc,
2902 sprite_width, sprite_height,
2898 pixel_size, enabled, scaled); 2903 pixel_size, enabled, scaled);
2899} 2904}
2900 2905
@@ -3289,7 +3294,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3289 3294
3290 mutex_lock(&dev_priv->rps.hw_lock); 3295 mutex_lock(&dev_priv->rps.hw_lock);
3291 if (dev_priv->rps.enabled) { 3296 if (dev_priv->rps.enabled) {
3292 if (IS_VALLEYVIEW(dev)) 3297 if (IS_CHERRYVIEW(dev))
3298 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3299 else if (IS_VALLEYVIEW(dev))
3293 vlv_set_rps_idle(dev_priv); 3300 vlv_set_rps_idle(dev_priv);
3294 else 3301 else
3295 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3302 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3392,6 +3399,8 @@ static void cherryview_disable_rps(struct drm_device *dev)
3392 struct drm_i915_private *dev_priv = dev->dev_private; 3399 struct drm_i915_private *dev_priv = dev->dev_private;
3393 3400
3394 I915_WRITE(GEN6_RC_CONTROL, 0); 3401 I915_WRITE(GEN6_RC_CONTROL, 0);
3402
3403 gen8_disable_rps_interrupts(dev);
3395} 3404}
3396 3405
3397static void valleyview_disable_rps(struct drm_device *dev) 3406static void valleyview_disable_rps(struct drm_device *dev)
@@ -3465,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev)
3465 3474
3466 spin_lock_irq(&dev_priv->irq_lock); 3475 spin_lock_irq(&dev_priv->irq_lock);
3467 WARN_ON(dev_priv->rps.pm_iir); 3476 WARN_ON(dev_priv->rps.pm_iir);
3468 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3477 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3469 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); 3478 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3470 spin_unlock_irq(&dev_priv->irq_lock); 3479 spin_unlock_irq(&dev_priv->irq_lock);
3471} 3480}
@@ -3476,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3476 3485
3477 spin_lock_irq(&dev_priv->irq_lock); 3486 spin_lock_irq(&dev_priv->irq_lock);
3478 WARN_ON(dev_priv->rps.pm_iir); 3487 WARN_ON(dev_priv->rps.pm_iir);
3479 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3488 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3480 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); 3489 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3481 spin_unlock_irq(&dev_priv->irq_lock); 3490 spin_unlock_irq(&dev_priv->irq_lock);
3482} 3491}
@@ -3781,7 +3790,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
3781 mutex_unlock(&dev_priv->rps.hw_lock); 3790 mutex_unlock(&dev_priv->rps.hw_lock);
3782} 3791}
3783 3792
3784int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 3793static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
3785{ 3794{
3786 u32 val, rp0; 3795 u32 val, rp0;
3787 3796
@@ -3801,7 +3810,17 @@ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3801 return rpe; 3810 return rpe;
3802} 3811}
3803 3812
3804int cherryview_rps_min_freq(struct drm_i915_private *dev_priv) 3813static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
3814{
3815 u32 val, rp1;
3816
3817 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3818 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3819
3820 return rp1;
3821}
3822
3823static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3805{ 3824{
3806 u32 val, rpn; 3825 u32 val, rpn;
3807 3826
@@ -3810,7 +3829,18 @@ int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3810 return rpn; 3829 return rpn;
3811} 3830}
3812 3831
3813int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 3832static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
3833{
3834 u32 val, rp1;
3835
3836 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3837
3838 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
3839
3840 return rp1;
3841}
3842
3843static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3814{ 3844{
3815 u32 val, rp0; 3845 u32 val, rp0;
3816 3846
@@ -3835,7 +3865,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3835 return rpe; 3865 return rpe;
3836} 3866}
3837 3867
3838int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 3868static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3839{ 3869{
3840 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3870 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3841} 3871}
@@ -3952,6 +3982,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
3952 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 3982 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3953 dev_priv->rps.efficient_freq); 3983 dev_priv->rps.efficient_freq);
3954 3984
3985 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
3986 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
3987 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
3988 dev_priv->rps.rp1_freq);
3989
3955 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 3990 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3956 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 3991 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3957 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 3992 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -3986,6 +4021,11 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
3986 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 4021 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3987 dev_priv->rps.efficient_freq); 4022 dev_priv->rps.efficient_freq);
3988 4023
4024 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4025 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4026 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4027 dev_priv->rps.rp1_freq);
4028
3989 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); 4029 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
3990 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 4030 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3991 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 4031 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -4093,6 +4133,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
4093 4133
4094 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 4134 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4095 4135
4136 gen8_enable_rps_interrupts(dev);
4137
4096 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4138 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4097} 4139}
4098 4140
@@ -4934,11 +4976,14 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
4934 struct drm_i915_private *dev_priv = dev->dev_private; 4976 struct drm_i915_private *dev_priv = dev->dev_private;
4935 4977
4936 /* Interrupts should be disabled already to avoid re-arming. */ 4978 /* Interrupts should be disabled already to avoid re-arming. */
4937 WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); 4979 WARN_ON(intel_irqs_enabled(dev_priv));
4938 4980
4939 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4981 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4940 4982
4941 cancel_work_sync(&dev_priv->rps.work); 4983 cancel_work_sync(&dev_priv->rps.work);
4984
4985 /* Force GPU to min freq during suspend */
4986 gen6_rps_idle(dev_priv);
4942} 4987}
4943 4988
4944void intel_disable_gt_powersave(struct drm_device *dev) 4989void intel_disable_gt_powersave(struct drm_device *dev)
@@ -4946,7 +4991,7 @@ void intel_disable_gt_powersave(struct drm_device *dev)
4946 struct drm_i915_private *dev_priv = dev->dev_private; 4991 struct drm_i915_private *dev_priv = dev->dev_private;
4947 4992
4948 /* Interrupts should be disabled already to avoid re-arming. */ 4993 /* Interrupts should be disabled already to avoid re-arming. */
4949 WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); 4994 WARN_ON(intel_irqs_enabled(dev_priv));
4950 4995
4951 if (IS_IRONLAKE_M(dev)) { 4996 if (IS_IRONLAKE_M(dev)) {
4952 ironlake_disable_drps(dev); 4997 ironlake_disable_drps(dev);
@@ -5684,6 +5729,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5684static void cherryview_init_clock_gating(struct drm_device *dev) 5729static void cherryview_init_clock_gating(struct drm_device *dev)
5685{ 5730{
5686 struct drm_i915_private *dev_priv = dev->dev_private; 5731 struct drm_i915_private *dev_priv = dev->dev_private;
5732 u32 val;
5733
5734 mutex_lock(&dev_priv->rps.hw_lock);
5735 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
5736 mutex_unlock(&dev_priv->rps.hw_lock);
5737 switch ((val >> 2) & 0x7) {
5738 case 0:
5739 case 1:
5740 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
5741 dev_priv->mem_freq = 1600;
5742 break;
5743 case 2:
5744 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
5745 dev_priv->mem_freq = 1600;
5746 break;
5747 case 3:
5748 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
5749 dev_priv->mem_freq = 2000;
5750 break;
5751 case 4:
5752 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
5753 dev_priv->mem_freq = 1600;
5754 break;
5755 case 5:
5756 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
5757 dev_priv->mem_freq = 1600;
5758 break;
5759 }
5760 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5687 5761
5688 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5762 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5689 5763
@@ -5924,7 +5998,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5924static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 5998static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5925{ 5999{
5926 struct drm_device *dev = dev_priv->dev; 6000 struct drm_device *dev = dev_priv->dev;
5927 unsigned long irqflags;
5928 6001
5929 /* 6002 /*
5930 * After we re-enable the power well, if we touch VGA register 0x3d5 6003 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -5940,21 +6013,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5940 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 6013 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5941 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6014 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5942 6015
5943 if (IS_BROADWELL(dev)) { 6016 if (IS_BROADWELL(dev))
5944 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 6017 gen8_irq_power_well_post_enable(dev_priv);
5945 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5946 dev_priv->de_irq_mask[PIPE_B]);
5947 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5948 ~dev_priv->de_irq_mask[PIPE_B] |
5949 GEN8_PIPE_VBLANK);
5950 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5951 dev_priv->de_irq_mask[PIPE_C]);
5952 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5953 ~dev_priv->de_irq_mask[PIPE_C] |
5954 GEN8_PIPE_VBLANK);
5955 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5956 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5957 }
5958} 6018}
5959 6019
5960static void hsw_set_power_well(struct drm_i915_private *dev_priv, 6020static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -6881,7 +6941,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6881 return 0; 6941 return 0;
6882} 6942}
6883 6943
6884int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) 6944static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
6885{ 6945{
6886 int div; 6946 int div;
6887 6947
@@ -6903,7 +6963,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6903 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); 6963 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6904} 6964}
6905 6965
6906int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) 6966static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6907{ 6967{
6908 int mul; 6968 int mul;
6909 6969
@@ -6925,6 +6985,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6925 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 6985 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6926} 6986}
6927 6987
6988static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6989{
6990 int div, freq;
6991
6992 switch (dev_priv->rps.cz_freq) {
6993 case 200:
6994 div = 5;
6995 break;
6996 case 267:
6997 div = 6;
6998 break;
6999 case 320:
7000 case 333:
7001 case 400:
7002 div = 8;
7003 break;
7004 default:
7005 return -1;
7006 }
7007
7008 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7009
7010 return freq;
7011}
7012
7013static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7014{
7015 int mul, opcode;
7016
7017 switch (dev_priv->rps.cz_freq) {
7018 case 200:
7019 mul = 5;
7020 break;
7021 case 267:
7022 mul = 6;
7023 break;
7024 case 320:
7025 case 333:
7026 case 400:
7027 mul = 8;
7028 break;
7029 default:
7030 return -1;
7031 }
7032
7033 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7034
7035 return opcode;
7036}
7037
7038int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7039{
7040 int ret = -1;
7041
7042 if (IS_CHERRYVIEW(dev_priv->dev))
7043 ret = chv_gpu_freq(dev_priv, val);
7044 else if (IS_VALLEYVIEW(dev_priv->dev))
7045 ret = byt_gpu_freq(dev_priv, val);
7046
7047 return ret;
7048}
7049
7050int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7051{
7052 int ret = -1;
7053
7054 if (IS_CHERRYVIEW(dev_priv->dev))
7055 ret = chv_freq_opcode(dev_priv, val);
7056 else if (IS_VALLEYVIEW(dev_priv->dev))
7057 ret = byt_freq_opcode(dev_priv, val);
7058
7059 return ret;
7060}
7061
6928void intel_pm_setup(struct drm_device *dev) 7062void intel_pm_setup(struct drm_device *dev)
6929{ 7063{
6930 struct drm_i915_private *dev_priv = dev->dev_private; 7064 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6935,5 +7069,5 @@ void intel_pm_setup(struct drm_device *dev)
6935 intel_gen6_powersave_work); 7069 intel_gen6_powersave_work);
6936 7070
6937 dev_priv->pm.suspended = false; 7071 dev_priv->pm.suspended = false;
6938 dev_priv->pm.irqs_disabled = false; 7072 dev_priv->pm._irqs_disabled = false;
6939} 7073}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 599709e80a16..b3d8f766fa7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1004,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
1004 1004
1005 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1005 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1006 if (ring->irq_refcount++ == 0) 1006 if (ring->irq_refcount++ == 0)
1007 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1007 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1008 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1008 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1009 1009
1010 return true; 1010 return true;
@@ -1019,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
1019 1019
1020 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1020 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1021 if (--ring->irq_refcount == 0) 1021 if (--ring->irq_refcount == 0)
1022 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1022 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1023 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1023 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1024} 1024}
1025 1025
@@ -1212,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
1212 GT_PARITY_ERROR(dev))); 1212 GT_PARITY_ERROR(dev)));
1213 else 1213 else
1214 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1214 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1215 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1215 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1216 } 1216 }
1217 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1217 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1218 1218
@@ -1232,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
1232 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1232 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1233 else 1233 else
1234 I915_WRITE_IMR(ring, ~0); 1234 I915_WRITE_IMR(ring, ~0);
1235 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1235 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1236 } 1236 }
1237 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1237 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1238} 1238}
@@ -1250,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
1250 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1250 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1251 if (ring->irq_refcount++ == 0) { 1251 if (ring->irq_refcount++ == 0) {
1252 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1252 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1253 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1253 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1254 } 1254 }
1255 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1255 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1256 1256
@@ -1270,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
1270 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1270 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1271 if (--ring->irq_refcount == 0) { 1271 if (--ring->irq_refcount == 0) {
1272 I915_WRITE_IMR(ring, ~0); 1272 I915_WRITE_IMR(ring, ~0);
1273 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1273 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1274 } 1274 }
1275 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1275 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1276} 1276}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 6afd1cfe7c44..168c6652cda1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
218 218
219 sprctl |= SP_ENABLE; 219 sprctl |= SP_ENABLE;
220 220
221 intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true, 221 intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
222 pixel_size, true,
222 src_w != crtc_w || src_h != crtc_h); 223 src_w != crtc_w || src_h != crtc_h);
223 224
224 /* Sizes are 0 based */ 225 /* Sizes are 0 based */
@@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
283 if (atomic_update) 284 if (atomic_update)
284 intel_pipe_update_end(intel_crtc, start_vbl_count); 285 intel_pipe_update_end(intel_crtc, start_vbl_count);
285 286
286 intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); 287 intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
287} 288}
288 289
289static int 290static int
@@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
406 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 407 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
407 sprctl |= SPRITE_PIPE_CSC_ENABLE; 408 sprctl |= SPRITE_PIPE_CSC_ENABLE;
408 409
409 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, 410 intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
411 true,
410 src_w != crtc_w || src_h != crtc_h); 412 src_w != crtc_w || src_h != crtc_h);
411 413
412 /* Sizes are 0 based */ 414 /* Sizes are 0 based */
@@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
486 */ 488 */
487 intel_wait_for_vblank(dev, pipe); 489 intel_wait_for_vblank(dev, pipe);
488 490
489 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 491 intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
490} 492}
491 493
492static int 494static int
@@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
606 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 608 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
607 dvscntr |= DVS_ENABLE; 609 dvscntr |= DVS_ENABLE;
608 610
609 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, 611 intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
612 pixel_size, true,
610 src_w != crtc_w || src_h != crtc_h); 613 src_w != crtc_w || src_h != crtc_h);
611 614
612 /* Sizes are 0 based */ 615 /* Sizes are 0 based */
@@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
681 */ 684 */
682 intel_wait_for_vblank(dev, pipe); 685 intel_wait_for_vblank(dev, pipe);
683 686
684 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 687 intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
685} 688}
686 689
687static void 690static void
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e0f0843569a6..e81bc3bdc533 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -514,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
514} 514}
515 515
516static void 516static void
517hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 517hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
518 bool before)
518{ 519{
520 const char *op = read ? "reading" : "writing to";
521 const char *when = before ? "before" : "after";
522
523 if (!i915.mmio_debug)
524 return;
525
519 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 526 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
520 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 527 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
521 reg); 528 when, op, reg);
522 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 529 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
523 } 530 }
524} 531}
525 532
526static void 533static void
527hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 534hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
528{ 535{
536 if (i915.mmio_debug)
537 return;
538
529 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 539 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
530 DRM_ERROR("Unclaimed write to %x\n", reg); 540 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
531 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 541 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
532 } 542 }
533} 543}
@@ -564,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
564static u##x \ 574static u##x \
565gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 575gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
566 REG_READ_HEADER(x); \ 576 REG_READ_HEADER(x); \
577 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
567 if (dev_priv->uncore.forcewake_count == 0 && \ 578 if (dev_priv->uncore.forcewake_count == 0 && \
568 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 579 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
569 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 580 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -574,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
574 } else { \ 585 } else { \
575 val = __raw_i915_read##x(dev_priv, reg); \ 586 val = __raw_i915_read##x(dev_priv, reg); \
576 } \ 587 } \
588 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
577 REG_READ_FOOTER; \ 589 REG_READ_FOOTER; \
578} 590}
579 591
@@ -700,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
700 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 712 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
701 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 713 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
702 } \ 714 } \
703 hsw_unclaimed_reg_clear(dev_priv, reg); \ 715 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
704 __raw_i915_write##x(dev_priv, reg, val); \ 716 __raw_i915_write##x(dev_priv, reg, val); \
705 if (unlikely(__fifo_ret)) { \ 717 if (unlikely(__fifo_ret)) { \
706 gen6_gt_check_fifodbg(dev_priv); \ 718 gen6_gt_check_fifodbg(dev_priv); \
707 } \ 719 } \
708 hsw_unclaimed_reg_check(dev_priv, reg); \ 720 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
721 hsw_unclaimed_reg_detect(dev_priv); \
709 REG_WRITE_FOOTER; \ 722 REG_WRITE_FOOTER; \
710} 723}
711 724
@@ -734,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
734static void \ 747static void \
735gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 748gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
736 REG_WRITE_HEADER; \ 749 REG_WRITE_HEADER; \
750 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
737 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ 751 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
738 if (dev_priv->uncore.forcewake_count == 0) \ 752 if (dev_priv->uncore.forcewake_count == 0) \
739 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 753 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -745,6 +759,8 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
745 } else { \ 759 } else { \
746 __raw_i915_write##x(dev_priv, reg, val); \ 760 __raw_i915_write##x(dev_priv, reg, val); \
747 } \ 761 } \
762 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
763 hsw_unclaimed_reg_detect(dev_priv); \
748 REG_WRITE_FOOTER; \ 764 REG_WRITE_FOOTER; \
749} 765}
750 766
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 284b80fc3c54..b08a450d1b5d 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -119,13 +119,6 @@ struct omap_drm_private {
119 struct omap_drm_irq error_handler; 119 struct omap_drm_irq error_handler;
120}; 120};
121 121
122/* this should probably be in drm-core to standardize amongst drivers */
123#define DRM_ROTATE_0 0
124#define DRM_ROTATE_90 1
125#define DRM_ROTATE_180 2
126#define DRM_ROTATE_270 3
127#define DRM_REFLECT_X 4
128#define DRM_REFLECT_Y 5
129 122
130#ifdef CONFIG_DEBUG_FS 123#ifdef CONFIG_DEBUG_FS
131int omap_debugfs_init(struct drm_minor *minor); 124int omap_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 6af3398b5278..891a4dc608af 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -308,16 +308,13 @@ void omap_plane_install_properties(struct drm_plane *plane,
308 if (priv->has_dmm) { 308 if (priv->has_dmm) {
309 prop = priv->rotation_prop; 309 prop = priv->rotation_prop;
310 if (!prop) { 310 if (!prop) {
311 const struct drm_prop_enum_list props[] = { 311 prop = drm_mode_create_rotation_property(dev,
312 { DRM_ROTATE_0, "rotate-0" }, 312 BIT(DRM_ROTATE_0) |
313 { DRM_ROTATE_90, "rotate-90" }, 313 BIT(DRM_ROTATE_90) |
314 { DRM_ROTATE_180, "rotate-180" }, 314 BIT(DRM_ROTATE_180) |
315 { DRM_ROTATE_270, "rotate-270" }, 315 BIT(DRM_ROTATE_270) |
316 { DRM_REFLECT_X, "reflect-x" }, 316 BIT(DRM_REFLECT_X) |
317 { DRM_REFLECT_Y, "reflect-y" }, 317 BIT(DRM_REFLECT_Y));
318 };
319 prop = drm_property_create_bitmask(dev, 0, "rotation",
320 props, ARRAY_SIZE(props));
321 if (prop == NULL) 318 if (prop == NULL)
322 return; 319 return;
323 priv->rotation_prop = prop; 320 priv->rotation_prop = prop;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 7f1bc7e4848b..f1105d0da059 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -76,6 +76,14 @@ static inline uint64_t I642U64(int64_t val)
76 return (uint64_t)*((uint64_t *)&val); 76 return (uint64_t)*((uint64_t *)&val);
77} 77}
78 78
79/* rotation property bits */
80#define DRM_ROTATE_0 0
81#define DRM_ROTATE_90 1
82#define DRM_ROTATE_180 2
83#define DRM_ROTATE_270 3
84#define DRM_REFLECT_X 4
85#define DRM_REFLECT_Y 5
86
79enum drm_connector_force { 87enum drm_connector_force {
80 DRM_FORCE_UNSPECIFIED, 88 DRM_FORCE_UNSPECIFIED,
81 DRM_FORCE_OFF, 89 DRM_FORCE_OFF,
@@ -835,6 +843,7 @@ struct drm_mode_config {
835 843
836 /* Optional properties */ 844 /* Optional properties */
837 struct drm_property *scaling_mode_property; 845 struct drm_property *scaling_mode_property;
846 struct drm_property *aspect_ratio_property;
838 struct drm_property *dirty_info_property; 847 struct drm_property *dirty_info_property;
839 848
840 /* dumb ioctl parameters */ 849 /* dumb ioctl parameters */
@@ -1011,7 +1020,8 @@ extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int
1011struct drm_property *drm_property_create_bitmask(struct drm_device *dev, 1020struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
1012 int flags, const char *name, 1021 int flags, const char *name,
1013 const struct drm_prop_enum_list *props, 1022 const struct drm_prop_enum_list *props,
1014 int num_values); 1023 int num_props,
1024 uint64_t supported_bits);
1015struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, 1025struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
1016 const char *name, 1026 const char *name,
1017 uint64_t min, uint64_t max); 1027 uint64_t min, uint64_t max);
@@ -1027,6 +1037,7 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
1027extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, 1037extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
1028 char *formats[]); 1038 char *formats[]);
1029extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1039extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
1040extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
1030extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 1041extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
1031 1042
1032extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 1043extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -1117,6 +1128,10 @@ extern int drm_format_plane_cpp(uint32_t format, int plane);
1117extern int drm_format_horz_chroma_subsampling(uint32_t format); 1128extern int drm_format_horz_chroma_subsampling(uint32_t format);
1118extern int drm_format_vert_chroma_subsampling(uint32_t format); 1129extern int drm_format_vert_chroma_subsampling(uint32_t format);
1119extern const char *drm_get_format_name(uint32_t format); 1130extern const char *drm_get_format_name(uint32_t format);
1131extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
1132 unsigned int supported_rotations);
1133extern unsigned int drm_rotation_simplify(unsigned int rotation,
1134 unsigned int supported_rotations);
1120 1135
1121/* Helpers */ 1136/* Helpers */
1122 1137
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index d1286297567b..26bb55e9e8b6 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -163,5 +163,11 @@ int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
163 struct drm_rect *dst, 163 struct drm_rect *dst,
164 int min_vscale, int max_vscale); 164 int min_vscale, int max_vscale);
165void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); 165void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
166void drm_rect_rotate(struct drm_rect *r,
167 int width, int height,
168 unsigned int rotation);
169void drm_rect_rotate_inv(struct drm_rect *r,
170 int width, int height,
171 unsigned int rotation);
166 172
167#endif 173#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index def54f9e07ca..a0db2d4aa5f0 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -88,6 +88,11 @@
88#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ 88#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
89#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ 89#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
90 90
91/* Picture aspect ratio options */
92#define DRM_MODE_PICTURE_ASPECT_NONE 0
93#define DRM_MODE_PICTURE_ASPECT_4_3 1
94#define DRM_MODE_PICTURE_ASPECT_16_9 2
95
91/* Dithering mode options */ 96/* Dithering mode options */
92#define DRM_MODE_DITHERING_OFF 0 97#define DRM_MODE_DITHERING_OFF 0
93#define DRM_MODE_DITHERING_ON 1 98#define DRM_MODE_DITHERING_ON 1