aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-03-27 02:43:39 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-27 02:43:39 -0400
commit0bc91d4ba77156ae9217d25ed7c434540f950d05 (patch)
tree949c1acf27b106184d8842586740fbbcc9c9ea62 /drivers/gpu
parent565977a3d929fc4427769117a8ac976ec16776d5 (diff)
parent3eb2ce825ea1ad89d20f7a3b5780df850e4be274 (diff)
Merge tag 'v4.16-rc7' into x86/mm, to fix up conflict
Conflicts: arch/x86/mm/init_64.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c20
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h4
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c71
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c5
-rw-r--r--drivers/gpu/drm/tegra/dc.c16
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/gpu/drm/tegra/dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/plane.c9
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c12
49 files changed, 344 insertions, 134 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 74d2efaec52f..7a073ac5f9c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
69 /* don't do anything if sink is not display port, i.e., 69 /* don't do anything if sink is not display port, i.e.,
70 * passive dp->(dvi|hdmi) adaptor 70 * passive dp->(dvi|hdmi) adaptor
71 */ 71 */
72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
73 int saved_dpms = connector->dpms; 73 amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
74 /* Only turn off the display if it's physically disconnected */ 74 amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
75 if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 75 /* Don't start link training before we have the DPCD */
76 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 76 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 return;
78 /* Don't try to start link training before we 78
79 * have the dpcd */ 79 /* Turn the connector off and back on immediately, which
80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 * will trigger link training
81 return; 81 */
82 82 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
83 /* set it to OFF so that drm_helper_connector_dpms() 83 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
84 * won't return immediately since the current state
85 * is ON at this point.
86 */
87 connector->dpms = DRM_MODE_DPMS_OFF;
88 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
89 }
90 connector->dpms = saved_dpms;
91 } 84 }
92 } 85 }
93} 86}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index af1b879a9ee9..66cb10cdc7c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,9 +2063,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2063 2063
2064 DRM_INFO("amdgpu: finishing device.\n"); 2064 DRM_INFO("amdgpu: finishing device.\n");
2065 adev->shutdown = true; 2065 adev->shutdown = true;
2066 if (adev->mode_info.mode_config_initialized) 2066 if (adev->mode_info.mode_config_initialized){
2067 drm_crtc_force_disable_all(adev->ddev); 2067 if (!amdgpu_device_has_dc_support(adev))
2068 2068 drm_crtc_force_disable_all(adev->ddev);
2069 else
2070 drm_atomic_helper_shutdown(adev->ddev);
2071 }
2069 amdgpu_ib_pool_fini(adev); 2072 amdgpu_ib_pool_fini(adev);
2070 amdgpu_fence_driver_fini(adev); 2073 amdgpu_fence_driver_fini(adev);
2071 amdgpu_fbdev_fini(adev); 2074 amdgpu_fbdev_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e48b4ec88c8c..ca6c931dabfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37 37
38 if (robj) { 38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 amdgpu_mn_unregister(robj); 39 amdgpu_mn_unregister(robj);
42 amdgpu_bo_unref(&robj); 40 amdgpu_bo_unref(&robj);
43 } 41 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 54f06c959340..2264c5c97009 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -352,6 +352,7 @@ struct amdgpu_mode_info {
352 u16 firmware_flags; 352 u16 firmware_flags;
353 /* pointer to backlight encoder */ 353 /* pointer to backlight encoder */
354 struct amdgpu_encoder *bl_encoder; 354 struct amdgpu_encoder *bl_encoder;
355 u8 bl_level; /* saved backlight level */
355 struct amdgpu_audio audio; /* audio stuff */ 356 struct amdgpu_audio audio; /* audio stuff */
356 int num_crtc; /* number of crtcs */ 357 int num_crtc; /* number of crtcs */
357 int num_hpd; /* number of hpd pins */ 358 int num_hpd; /* number of hpd pins */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5c4c3e0d527b..1220322c1680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
56 56
57 amdgpu_bo_kunmap(bo); 57 amdgpu_bo_kunmap(bo);
58 58
59 if (bo->gem_base.import_attach)
60 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
59 drm_gem_object_release(&bo->gem_base); 61 drm_gem_object_release(&bo->gem_base);
60 amdgpu_bo_unref(&bo->parent); 62 amdgpu_bo_unref(&bo->parent);
61 if (!list_empty(&bo->shadow_list)) { 63 if (!list_empty(&bo->shadow_list)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..d702fb8e3427 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include "bif/bif_4_1_d.h" 35#include "bif/bif_4_1_d.h"
36 36
37static u8 37u8
38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) 38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
39{ 39{
40 u8 backlight_level; 40 u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
48 return backlight_level; 48 return backlight_level;
49} 49}
50 50
51static void 51void
52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, 52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
53 u8 backlight_level) 53 u8 backlight_level)
54{ 54{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
25#define __ATOMBIOS_ENCODER_H__ 25#define __ATOMBIOS_ENCODER_H__
26 26
27u8 27u8
28amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
29void
30amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
31 u8 backlight_level);
32u8
28amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); 33amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
29void 34void
30amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, 35amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f34bc68aadfb..022f303463fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle)
2921 2921
2922static int dce_v10_0_suspend(void *handle) 2922static int dce_v10_0_suspend(void *handle)
2923{ 2923{
2924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2925
2926 adev->mode_info.bl_level =
2927 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2928
2924 return dce_v10_0_hw_fini(handle); 2929 return dce_v10_0_hw_fini(handle);
2925} 2930}
2926 2931
@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle)
2929 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2930 int ret; 2935 int ret;
2931 2936
2937 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2938 adev->mode_info.bl_level);
2939
2932 ret = dce_v10_0_hw_init(handle); 2940 ret = dce_v10_0_hw_init(handle);
2933 2941
2934 /* turn on the BL */ 2942 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 26378bd6aba4..800a9f36ab4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle)
3047 3047
3048static int dce_v11_0_suspend(void *handle) 3048static int dce_v11_0_suspend(void *handle)
3049{ 3049{
3050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3051
3052 adev->mode_info.bl_level =
3053 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
3054
3050 return dce_v11_0_hw_fini(handle); 3055 return dce_v11_0_hw_fini(handle);
3051} 3056}
3052 3057
@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle)
3055 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3056 int ret; 3061 int ret;
3057 3062
3063 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3064 adev->mode_info.bl_level);
3065
3058 ret = dce_v11_0_hw_init(handle); 3066 ret = dce_v11_0_hw_init(handle);
3059 3067
3060 /* turn on the BL */ 3068 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index a712f4b285f6..b8368f69ce1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle)
2787 2787
2788static int dce_v6_0_suspend(void *handle) 2788static int dce_v6_0_suspend(void *handle)
2789{ 2789{
2790 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2791
2792 adev->mode_info.bl_level =
2793 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2794
2790 return dce_v6_0_hw_fini(handle); 2795 return dce_v6_0_hw_fini(handle);
2791} 2796}
2792 2797
@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle)
2795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2800 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2796 int ret; 2801 int ret;
2797 2802
2803 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2804 adev->mode_info.bl_level);
2805
2798 ret = dce_v6_0_hw_init(handle); 2806 ret = dce_v6_0_hw_init(handle);
2799 2807
2800 /* turn on the BL */ 2808 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c008dc030687..012e0a9ae0ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle)
2819 2819
2820static int dce_v8_0_suspend(void *handle) 2820static int dce_v8_0_suspend(void *handle)
2821{ 2821{
2822 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2823
2824 adev->mode_info.bl_level =
2825 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2826
2822 return dce_v8_0_hw_fini(handle); 2827 return dce_v8_0_hw_fini(handle);
2823} 2828}
2824 2829
@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle)
2827 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2828 int ret; 2833 int ret;
2829 2834
2835 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2836 adev->mode_info.bl_level);
2837
2830 ret = dce_v8_0_hw_init(handle); 2838 ret = dce_v8_0_hw_init(handle);
2831 2839
2832 /* turn on the BL */ 2840 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c345e645f1d7..63c67346d316 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3134,8 +3134,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3134 3134
3135 switch (aplane->base.type) { 3135 switch (aplane->base.type) {
3136 case DRM_PLANE_TYPE_PRIMARY: 3136 case DRM_PLANE_TYPE_PRIMARY:
3137 aplane->base.format_default = true;
3138
3139 res = drm_universal_plane_init( 3137 res = drm_universal_plane_init(
3140 dm->adev->ddev, 3138 dm->adev->ddev,
3141 &aplane->base, 3139 &aplane->base,
@@ -4794,6 +4792,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4794 return -EDEADLK; 4792 return -EDEADLK;
4795 4793
4796 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); 4794 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4795 if (IS_ERR(crtc_state))
4796 return PTR_ERR(crtc_state);
4797
4797 if (crtc->primary == plane && crtc_state->active) { 4798 if (crtc->primary == plane && crtc_state->active) {
4798 if (!plane_state->fb) 4799 if (!plane_state->fb)
4799 return -EINVAL; 4800 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9bd142f65f9b..e1acc10e35a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
109 struct cea_sad *sad = &sads[i]; 109 struct cea_sad *sad = &sads[i];
110 110
111 edid_caps->audio_modes[i].format_code = sad->format; 111 edid_caps->audio_modes[i].format_code = sad->format;
112 edid_caps->audio_modes[i].channel_count = sad->channels; 112 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
113 edid_caps->audio_modes[i].sample_rate = sad->freq; 113 edid_caps->audio_modes[i].sample_rate = sad->freq;
114 edid_caps->audio_modes[i].sample_size = sad->byte2; 114 edid_caps->audio_modes[i].sample_size = sad->byte2;
115 } 115 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index a993279a8f2d..f11f17fe08f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -496,6 +496,9 @@ struct dce_hwseq_registers {
496 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 496 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
497 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 497 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
498 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\ 498 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
499 HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
500 HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
501 HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
499 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\ 502 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
500 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ 503 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
501 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 504 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
@@ -591,7 +594,10 @@ struct dce_hwseq_registers {
591 type DENTIST_DISPCLK_WDIVIDER; \ 594 type DENTIST_DISPCLK_WDIVIDER; \
592 type VGA_TEST_ENABLE; \ 595 type VGA_TEST_ENABLE; \
593 type VGA_TEST_RENDER_START; \ 596 type VGA_TEST_RENDER_START; \
594 type D1VGA_MODE_ENABLE; 597 type D1VGA_MODE_ENABLE; \
598 type D2VGA_MODE_ENABLE; \
599 type D3VGA_MODE_ENABLE; \
600 type D4VGA_MODE_ENABLE;
595 601
596struct dce_hwseq_shift { 602struct dce_hwseq_shift {
597 HWSEQ_REG_FIELD_LIST(uint8_t) 603 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index 3931412ab6d3..87093894ea9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -128,23 +128,22 @@ static void set_truncation(
128 return; 128 return;
129 } 129 }
130 /* on other format-to do */ 130 /* on other format-to do */
131 if (params->flags.TRUNCATE_ENABLED == 0 || 131 if (params->flags.TRUNCATE_ENABLED == 0)
132 params->flags.TRUNCATE_DEPTH == 2)
133 return; 132 return;
134 /*Set truncation depth and Enable truncation*/ 133 /*Set truncation depth and Enable truncation*/
135 REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, 134 REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
136 FMT_TRUNCATE_EN, 1, 135 FMT_TRUNCATE_EN, 1,
137 FMT_TRUNCATE_DEPTH, 136 FMT_TRUNCATE_DEPTH,
138 params->flags.TRUNCATE_MODE, 137 params->flags.TRUNCATE_DEPTH,
139 FMT_TRUNCATE_MODE, 138 FMT_TRUNCATE_MODE,
140 params->flags.TRUNCATE_DEPTH); 139 params->flags.TRUNCATE_MODE);
141} 140}
142 141
143 142
144/** 143/**
145 * set_spatial_dither 144 * set_spatial_dither
146 * 1) set spatial dithering mode: pattern of seed 145 * 1) set spatial dithering mode: pattern of seed
147 * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp 146 * 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
148 * 3) set random seed 147 * 3) set random seed
149 * 4) set random mode 148 * 4) set random mode
150 * lfsr is reset every frame or not reset 149 * lfsr is reset every frame or not reset
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 072e4485e85e..dc1e010725c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -238,14 +238,24 @@ static void enable_power_gating_plane(
238static void disable_vga( 238static void disable_vga(
239 struct dce_hwseq *hws) 239 struct dce_hwseq *hws)
240{ 240{
241 unsigned int in_vga_mode = 0; 241 unsigned int in_vga1_mode = 0;
242 242 unsigned int in_vga2_mode = 0;
243 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode); 243 unsigned int in_vga3_mode = 0;
244 244 unsigned int in_vga4_mode = 0;
245 if (in_vga_mode == 0) 245
246 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
247 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
248 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
249 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
250
251 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
252 in_vga3_mode == 0 && in_vga4_mode == 0)
246 return; 253 return;
247 254
248 REG_WRITE(D1VGA_CONTROL, 0); 255 REG_WRITE(D1VGA_CONTROL, 0);
256 REG_WRITE(D2VGA_CONTROL, 0);
257 REG_WRITE(D3VGA_CONTROL, 0);
258 REG_WRITE(D4VGA_CONTROL, 0);
249 259
250 /* HW Engineer's Notes: 260 /* HW Engineer's Notes:
251 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and 261 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 5f4c2e833a65..d665dd5af5dd 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {
97 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 97 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
98 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 98 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
99 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 99 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
100 {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ 100 {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
101 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 101 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
102 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 102 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
103 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 103 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
127 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 127 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
128 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 128 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
129 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 129 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
130 {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ 130 {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
131 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 131 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
132 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 132 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
133 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 133 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index c0530a1af5e3..2dc5e8bed172 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -461,6 +461,12 @@ int drm_mode_getfb(struct drm_device *dev,
461 if (!fb) 461 if (!fb)
462 return -ENOENT; 462 return -ENOENT;
463 463
464 /* Multi-planar framebuffers need getfb2. */
465 if (fb->format->num_planes > 1) {
466 ret = -EINVAL;
467 goto out;
468 }
469
464 r->height = fb->height; 470 r->height = fb->height;
465 r->width = fb->width; 471 r->width = fb->width;
466 r->depth = fb->format->depth; 472 r->depth = fb->format->depth;
@@ -484,6 +490,7 @@ int drm_mode_getfb(struct drm_device *dev,
484 ret = -ENODEV; 490 ret = -ENODEV;
485 } 491 }
486 492
493out:
487 drm_framebuffer_put(fb); 494 drm_framebuffer_put(fb);
488 495
489 return ret; 496 return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index c8454ac43fae..db6b94dda5df 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -471,6 +471,7 @@ struct parser_exec_state {
471 * used when ret from 2nd level batch buffer 471 * used when ret from 2nd level batch buffer
472 */ 472 */
473 int saved_buf_addr_type; 473 int saved_buf_addr_type;
474 bool is_ctx_wa;
474 475
475 struct cmd_info *info; 476 struct cmd_info *info;
476 477
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1715 bb->accessing = true; 1716 bb->accessing = true;
1716 bb->bb_start_cmd_va = s->ip_va; 1717 bb->bb_start_cmd_va = s->ip_va;
1717 1718
1719 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1720 bb->bb_offset = s->ip_va - s->rb_va;
1721 else
1722 bb->bb_offset = 0;
1723
1718 /* 1724 /*
1719 * ip_va saves the virtual address of the shadow batch buffer, while 1725 * ip_va saves the virtual address of the shadow batch buffer, while
1720 * ip_gma saves the graphics address of the original batch buffer. 1726 * ip_gma saves the graphics address of the original batch buffer.
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
2571 s.ring_tail = gma_tail; 2577 s.ring_tail = gma_tail;
2572 s.rb_va = workload->shadow_ring_buffer_va; 2578 s.rb_va = workload->shadow_ring_buffer_va;
2573 s.workload = workload; 2579 s.workload = workload;
2580 s.is_ctx_wa = false;
2574 2581
2575 if ((bypass_scan_mask & (1 << workload->ring_id)) || 2582 if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2576 gma_head == gma_tail) 2583 gma_head == gma_tail)
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2624 s.ring_tail = gma_tail; 2631 s.ring_tail = gma_tail;
2625 s.rb_va = wa_ctx->indirect_ctx.shadow_va; 2632 s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2626 s.workload = workload; 2633 s.workload = workload;
2634 s.is_ctx_wa = true;
2627 2635
2628 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { 2636 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2629 ret = -EINVAL; 2637 ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 256f1bb522b7..152df3d0291e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
394 * performace for batch mmio read/write, so we need 394 * performace for batch mmio read/write, so we need
395 * handle forcewake mannually. 395 * handle forcewake mannually.
396 */ 396 */
397 intel_runtime_pm_get(dev_priv);
397 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 398 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
398 switch_mmio(pre, next, ring_id); 399 switch_mmio(pre, next, ring_id);
399 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 400 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
401 intel_runtime_pm_put(dev_priv);
400} 402}
401 403
402/** 404/**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b55b3580ca1d..d74d6f05c62c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer(
52 pdp_pair[i].val = pdp[7 - i]; 52 pdp_pair[i].val = pdp[7 - i];
53} 53}
54 54
55/*
56 * when populating shadow ctx from guest, we should not overrride oa related
57 * registers, so that they will not be overlapped by guest oa configs. Thus
58 * made it possible to capture oa data from host for both host and guests.
59 */
60static void sr_oa_regs(struct intel_vgpu_workload *workload,
61 u32 *reg_state, bool save)
62{
63 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
64 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
65 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
66 int i = 0;
67 u32 flex_mmio[] = {
68 i915_mmio_reg_offset(EU_PERF_CNTL0),
69 i915_mmio_reg_offset(EU_PERF_CNTL1),
70 i915_mmio_reg_offset(EU_PERF_CNTL2),
71 i915_mmio_reg_offset(EU_PERF_CNTL3),
72 i915_mmio_reg_offset(EU_PERF_CNTL4),
73 i915_mmio_reg_offset(EU_PERF_CNTL5),
74 i915_mmio_reg_offset(EU_PERF_CNTL6),
75 };
76
77 if (!workload || !reg_state || workload->ring_id != RCS)
78 return;
79
80 if (save) {
81 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
82
83 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
84 u32 state_offset = ctx_flexeu0 + i * 2;
85
86 workload->flex_mmio[i] = reg_state[state_offset + 1];
87 }
88 } else {
89 reg_state[ctx_oactxctrl] =
90 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
91 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
92
93 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
94 u32 state_offset = ctx_flexeu0 + i * 2;
95 u32 mmio = flex_mmio[i];
96
97 reg_state[state_offset] = mmio;
98 reg_state[state_offset + 1] = workload->flex_mmio[i];
99 }
100 }
101}
102
55static int populate_shadow_context(struct intel_vgpu_workload *workload) 103static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{ 104{
57 struct intel_vgpu *vgpu = workload->vgpu; 105 struct intel_vgpu *vgpu = workload->vgpu;
@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 146 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
99 shadow_ring_context = kmap(page); 147 shadow_ring_context = kmap(page);
100 148
149 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
101#define COPY_REG(name) \ 150#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ 151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 152 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
122 sizeof(*shadow_ring_context), 171 sizeof(*shadow_ring_context),
123 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 172 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124 173
174 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
125 kunmap(page); 175 kunmap(page);
126 return 0; 176 return 0;
127} 177}
@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
376 goto err; 426 goto err;
377 } 427 }
378 428
429 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
430 * is only updated into ring_scan_buffer, not real ring address
431 * allocated in later copy_workload_to_ring_buffer. pls be noted
432 * shadow_ring_buffer_va is now pointed to real ring buffer va
433 * in copy_workload_to_ring_buffer.
434 */
435
436 if (bb->bb_offset)
437 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
438 + bb->bb_offset;
439
379 /* relocate shadow batch buffer */ 440 /* relocate shadow batch buffer */
380 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); 441 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
381 if (gmadr_bytes == 8) 442 if (gmadr_bytes == 8)
@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1044 1105
1045 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); 1106 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1046 1107
1047 s->workloads = kmem_cache_create("gvt-g_vgpu_workload", 1108 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1048 sizeof(struct intel_vgpu_workload), 0, 1109 sizeof(struct intel_vgpu_workload), 0,
1049 SLAB_HWCACHE_ALIGN, 1110 SLAB_HWCACHE_ALIGN,
1050 NULL); 1111 offsetof(struct intel_vgpu_workload, rb_tail),
1112 sizeof_field(struct intel_vgpu_workload, rb_tail),
1113 NULL);
1051 1114
1052 if (!s->workloads) { 1115 if (!s->workloads) {
1053 ret = -ENOMEM; 1116 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ff175a98b19e..a79a4f60637e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -110,6 +110,10 @@ struct intel_vgpu_workload {
110 /* shadow batch buffer */ 110 /* shadow batch buffer */
111 struct list_head shadow_bb; 111 struct list_head shadow_bb;
112 struct intel_shadow_wa_ctx wa_ctx; 112 struct intel_shadow_wa_ctx wa_ctx;
113
114 /* oa registers */
115 u32 oactxctrl;
116 u32 flex_mmio[7];
113}; 117};
114 118
115struct intel_vgpu_shadow_bb { 119struct intel_vgpu_shadow_bb {
@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb {
120 u32 *bb_start_cmd_va; 124 u32 *bb_start_cmd_va;
121 unsigned int clflush; 125 unsigned int clflush;
122 bool accessing; 126 bool accessing;
127 unsigned long bb_offset;
123}; 128};
124 129
125#define workload_q_head(vgpu, ring_id) \ 130#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 66ee9d888d16..6ff5d655c202 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
434 dma_fence_put(shared[i]); 434 dma_fence_put(shared[i]);
435 kfree(shared); 435 kfree(shared);
436 436
437 /*
438 * If both shared fences and an exclusive fence exist,
439 * then by construction the shared fences must be later
440 * than the exclusive fence. If we successfully wait for
441 * all the shared fences, we know that the exclusive fence
442 * must all be signaled. If all the shared fences are
443 * signaled, we can prune the array and recover the
444 * floating references on the fences/requests.
445 */
437 prune_fences = count && timeout >= 0; 446 prune_fences = count && timeout >= 0;
438 } else { 447 } else {
439 excl = reservation_object_get_excl_rcu(resv); 448 excl = reservation_object_get_excl_rcu(resv);
440 } 449 }
441 450
442 if (excl && timeout >= 0) { 451 if (excl && timeout >= 0)
443 timeout = i915_gem_object_wait_fence(excl, flags, timeout, 452 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444 rps_client); 453 rps_client);
445 prune_fences = timeout >= 0;
446 }
447 454
448 dma_fence_put(excl); 455 dma_fence_put(excl);
449 456
450 /* Oportunistically prune the fences iff we know they have *all* been 457 /*
458 * Opportunistically prune the fences iff we know they have *all* been
451 * signaled and that the reservation object has not been changed (i.e. 459 * signaled and that the reservation object has not been changed (i.e.
452 * no new fences have been added). 460 * no new fences have been added).
453 */ 461 */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index b33d2158c234..e5e6f6bb2b05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
304{ 304{
305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
306 struct intel_rps *rps = &dev_priv->gt_pm.rps; 306 struct intel_rps *rps = &dev_priv->gt_pm.rps;
307 u32 val; 307 bool boost = false;
308 ssize_t ret; 308 ssize_t ret;
309 u32 val;
309 310
310 ret = kstrtou32(buf, 0, &val); 311 ret = kstrtou32(buf, 0, &val);
311 if (ret) 312 if (ret)
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
317 return -EINVAL; 318 return -EINVAL;
318 319
319 mutex_lock(&dev_priv->pcu_lock); 320 mutex_lock(&dev_priv->pcu_lock);
320 rps->boost_freq = val; 321 if (val != rps->boost_freq) {
322 rps->boost_freq = val;
323 boost = atomic_read(&rps->num_waiters);
324 }
321 mutex_unlock(&dev_priv->pcu_lock); 325 mutex_unlock(&dev_priv->pcu_lock);
326 if (boost)
327 schedule_work(&rps->work);
322 328
323 return count; 329 return count;
324} 330}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f51645a08dca..6aff9d096e13 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2175,8 +2175,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2175 intel_prepare_dp_ddi_buffers(encoder, crtc_state); 2175 intel_prepare_dp_ddi_buffers(encoder, crtc_state);
2176 2176
2177 intel_ddi_init_dp_buf_reg(encoder); 2177 intel_ddi_init_dp_buf_reg(encoder);
2178 if (!is_mst) 2178 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2179 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2180 intel_dp_start_link_train(intel_dp); 2179 intel_dp_start_link_train(intel_dp);
2181 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) 2180 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
2182 intel_dp_stop_link_train(intel_dp); 2181 intel_dp_stop_link_train(intel_dp);
@@ -2274,14 +2273,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
2274 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2273 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2275 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 2274 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
2276 struct intel_dp *intel_dp = &dig_port->dp; 2275 struct intel_dp *intel_dp = &dig_port->dp;
2277 bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
2278 2276
2279 /* 2277 /*
2280 * Power down sink before disabling the port, otherwise we end 2278 * Power down sink before disabling the port, otherwise we end
2281 * up getting interrupts from the sink on detecting link loss. 2279 * up getting interrupts from the sink on detecting link loss.
2282 */ 2280 */
2283 if (!is_mst) 2281 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2284 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2285 2282
2286 intel_disable_ddi_buf(encoder); 2283 intel_disable_ddi_buf(encoder);
2287 2284
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 35c5299feab6..a29868cd30c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -620,19 +620,15 @@ static int
620bxt_power_sequencer_idx(struct intel_dp *intel_dp) 620bxt_power_sequencer_idx(struct intel_dp *intel_dp)
621{ 621{
622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
623 int backlight_controller = dev_priv->vbt.backlight.controller;
623 624
624 lockdep_assert_held(&dev_priv->pps_mutex); 625 lockdep_assert_held(&dev_priv->pps_mutex);
625 626
626 /* We should never land here with regular DP ports */ 627 /* We should never land here with regular DP ports */
627 WARN_ON(!intel_dp_is_edp(intel_dp)); 628 WARN_ON(!intel_dp_is_edp(intel_dp));
628 629
629 /*
630 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
631 * mapping needs to be retrieved from VBT, for now just hard-code to
632 * use instance #0 always.
633 */
634 if (!intel_dp->pps_reset) 630 if (!intel_dp->pps_reset)
635 return 0; 631 return backlight_controller;
636 632
637 intel_dp->pps_reset = false; 633 intel_dp->pps_reset = false;
638 634
@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
642 */ 638 */
643 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 639 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
644 640
645 return 0; 641 return backlight_controller;
646} 642}
647 643
648typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 644typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 348a4f7ffb67..53747318f4a7 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
246 */ 246 */
247 tmp = I915_READ_CTL(engine); 247 tmp = I915_READ_CTL(engine);
248 if (tmp & RING_WAIT) { 248 if (tmp & RING_WAIT) {
249 i915_handle_error(dev_priv, 0, 249 i915_handle_error(dev_priv, BIT(engine->id),
250 "Kicking stuck wait on %s", 250 "Kicking stuck wait on %s",
251 engine->name); 251 engine->name);
252 I915_WRITE_CTL(engine, tmp); 252 I915_WRITE_CTL(engine, tmp);
@@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
258 default: 258 default:
259 return ENGINE_DEAD; 259 return ENGINE_DEAD;
260 case 1: 260 case 1:
261 i915_handle_error(dev_priv, 0, 261 i915_handle_error(dev_priv, ALL_ENGINES,
262 "Kicking stuck semaphore on %s", 262 "Kicking stuck semaphore on %s",
263 engine->name); 263 engine->name);
264 I915_WRITE_CTL(engine, tmp); 264 I915_WRITE_CTL(engine, tmp);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9a9961802f5c..e83af0f2be86 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
225 struct drm_crtc_state *old_crtc_state) 225 struct drm_crtc_state *old_crtc_state)
226{ 226{
227 drm_crtc_vblank_on(crtc); 227 drm_crtc_vblank_on(crtc);
228}
228 229
230static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
231 struct drm_crtc_state *old_crtc_state)
232{
229 spin_lock_irq(&crtc->dev->event_lock); 233 spin_lock_irq(&crtc->dev->event_lock);
230 if (crtc->state->event) { 234 if (crtc->state->event) {
231 WARN_ON(drm_crtc_vblank_get(crtc)); 235 WARN_ON(drm_crtc_vblank_get(crtc));
@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
293 .mode_set_nofb = ipu_crtc_mode_set_nofb, 297 .mode_set_nofb = ipu_crtc_mode_set_nofb,
294 .atomic_check = ipu_crtc_atomic_check, 298 .atomic_check = ipu_crtc_atomic_check,
295 .atomic_begin = ipu_crtc_atomic_begin, 299 .atomic_begin = ipu_crtc_atomic_begin,
300 .atomic_flush = ipu_crtc_atomic_flush,
296 .atomic_disable = ipu_crtc_atomic_disable, 301 .atomic_disable = ipu_crtc_atomic_disable,
297 .atomic_enable = ipu_crtc_atomic_enable, 302 .atomic_enable = ipu_crtc_atomic_enable,
298}; 303};
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 57ed56d8623f..d9113faaa62f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -22,6 +22,7 @@
22#include <drm/drm_plane_helper.h> 22#include <drm/drm_plane_helper.h>
23 23
24#include "video/imx-ipu-v3.h" 24#include "video/imx-ipu-v3.h"
25#include "imx-drm.h"
25#include "ipuv3-plane.h" 26#include "ipuv3-plane.h"
26 27
27struct ipu_plane_state { 28struct ipu_plane_state {
@@ -272,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
272 kfree(ipu_plane); 273 kfree(ipu_plane);
273} 274}
274 275
275void ipu_plane_state_reset(struct drm_plane *plane) 276static void ipu_plane_state_reset(struct drm_plane *plane)
276{ 277{
277 struct ipu_plane_state *ipu_state; 278 struct ipu_plane_state *ipu_state;
278 279
@@ -292,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane)
292 plane->state = &ipu_state->base; 293 plane->state = &ipu_state->base;
293} 294}
294 295
295struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane) 296static struct drm_plane_state *
297ipu_plane_duplicate_state(struct drm_plane *plane)
296{ 298{
297 struct ipu_plane_state *state; 299 struct ipu_plane_state *state;
298 300
@@ -306,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
306 return &state->base; 308 return &state->base;
307} 309}
308 310
309void ipu_plane_destroy_state(struct drm_plane *plane, 311static void ipu_plane_destroy_state(struct drm_plane *plane,
310 struct drm_plane_state *state) 312 struct drm_plane_state *state)
311{ 313{
312 struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); 314 struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
313 315
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 380f340204e8..debbbf0fd4bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
136 struct nvif_object *device = &drm->client.device.object; 136 struct nvif_object *device = &drm->client.device.object;
137 int or = nv_encoder->or; 137 int or = ffs(nv_encoder->dcb->or) - 1;
138 u32 div = 1025; 138 u32 div = 1025;
139 u32 val; 139 u32 val;
140 140
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)
149 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 149 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
151 struct nvif_object *device = &drm->client.device.object; 151 struct nvif_object *device = &drm->client.device.object;
152 int or = nv_encoder->or; 152 int or = ffs(nv_encoder->dcb->or) - 1;
153 u32 div = 1025; 153 u32 div = 1025;
154 u32 val = (bd->props.brightness * div) / 100; 154 u32 val = (bd->props.brightness * div) / 100;
155 155
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)
170 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 170 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
172 struct nvif_object *device = &drm->client.device.object; 172 struct nvif_object *device = &drm->client.device.object;
173 int or = nv_encoder->or; 173 int or = ffs(nv_encoder->dcb->or) - 1;
174 u32 div, val; 174 u32 div, val;
175 175
176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)
188 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 188 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
190 struct nvif_object *device = &drm->client.device.object; 190 struct nvif_object *device = &drm->client.device.object;
191 int or = nv_encoder->or; 191 int or = ffs(nv_encoder->dcb->or) - 1;
192 u32 div, val; 192 u32 div, val;
193 193
194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)
228 return -ENODEV; 228 return -ENODEV;
229 } 229 }
230 230
231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
232 return 0; 232 return 0;
233 233
234 if (drm->client.device.info.chipset <= 0xa0 || 234 if (drm->client.device.info.chipset <= 0xa0 ||
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
268 struct nvif_device *device = &drm->client.device; 268 struct nvif_device *device = &drm->client.device;
269 struct drm_connector *connector; 269 struct drm_connector *connector;
270 270
271 INIT_LIST_HEAD(&drm->bl_connectors);
272
271 if (apple_gmux_present()) { 273 if (apple_gmux_present()) {
272 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); 274 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
273 return 0; 275 return 0;
274 } 276 }
275 277
276 INIT_LIST_HEAD(&drm->bl_connectors);
277
278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
280 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 280 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 93946dcee319..1c12e58f44c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1354 1354
1355 tail = this->addr + this->size; 1355 tail = this->addr + this->size;
1356 if (vmm->func->page_block && next && next->page != p) 1356 if (vmm->func->page_block && next && next->page != p)
1357 tail = ALIGN_DOWN(addr, vmm->func->page_block); 1357 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1358 1358
1359 if (addr <= tail && tail - addr >= size) { 1359 if (addr <= tail && tail - addr >= size) {
1360 rb_erase(&this->tree, &vmm->free); 1360 rb_erase(&this->tree, &vmm->free);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2e2ca3c6b47d..df9469a8fdb1 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
90 /* don't do anything if sink is not display port, i.e., 90 /* don't do anything if sink is not display port, i.e.,
91 * passive dp->(dvi|hdmi) adaptor 91 * passive dp->(dvi|hdmi) adaptor
92 */ 92 */
93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
94 int saved_dpms = connector->dpms; 94 radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
95 /* Only turn off the display if it's physically disconnected */ 95 radeon_dp_needs_link_train(radeon_connector)) {
96 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 96 /* Don't start link training before we have the DPCD */
97 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 97 if (!radeon_dp_getdpcd(radeon_connector))
98 } else if (radeon_dp_needs_link_train(radeon_connector)) { 98 return;
99 /* Don't try to start link training before we 99
100 * have the dpcd */ 100 /* Turn the connector off and back on immediately, which
101 if (!radeon_dp_getdpcd(radeon_connector)) 101 * will trigger link training
102 return; 102 */
103 103 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
104 /* set it to OFF so that drm_helper_connector_dpms() 104 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
105 * won't return immediately since the current state
106 * is ON at this point.
107 */
108 connector->dpms = DRM_MODE_DPMS_OFF;
109 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
110 }
111 connector->dpms = saved_dpms;
112 } 105 }
113 } 106 }
114} 107}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a9962ffba720..27d8e7dd2d06 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 35
36 if (robj) { 36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj); 37 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj); 38 radeon_bo_unref(&robj);
41 } 39 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 15404af9d740..31f5ad605e59 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
82 mutex_unlock(&bo->rdev->gem.mutex); 82 mutex_unlock(&bo->rdev->gem.mutex);
83 radeon_bo_clear_surface_reg(bo); 83 radeon_bo_clear_surface_reg(bo);
84 WARN_ON_ONCE(!list_empty(&bo->va)); 84 WARN_ON_ONCE(!list_empty(&bo->va));
85 if (bo->gem_base.import_attach)
86 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
85 drm_gem_object_release(&bo->gem_base); 87 drm_gem_object_release(&bo->gem_base);
86 kfree(bo); 88 kfree(bo);
87} 89}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 4570da0227b4..d9a71f361b14 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -111,7 +111,7 @@ static int sun4i_drv_bind(struct device *dev)
111 /* drm_vblank_init calls kcalloc, which can fail */ 111 /* drm_vblank_init calls kcalloc, which can fail */
112 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 112 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
113 if (ret) 113 if (ret)
114 goto free_mem_region; 114 goto cleanup_mode_config;
115 115
116 drm->irq_enabled = true; 116 drm->irq_enabled = true;
117 117
@@ -139,7 +139,6 @@ finish_poll:
139 sun4i_framebuffer_free(drm); 139 sun4i_framebuffer_free(drm);
140cleanup_mode_config: 140cleanup_mode_config:
141 drm_mode_config_cleanup(drm); 141 drm_mode_config_cleanup(drm);
142free_mem_region:
143 of_reserved_mem_device_release(dev); 142 of_reserved_mem_device_release(dev);
144free_drm: 143free_drm:
145 drm_dev_unref(drm); 144 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 500b6fb3e028..fa4bcd092eaf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
538 &sun4i_hdmi_regmap_config); 538 &sun4i_hdmi_regmap_config);
539 if (IS_ERR(hdmi->regmap)) { 539 if (IS_ERR(hdmi->regmap)) {
540 dev_err(dev, "Couldn't create HDMI encoder regmap\n"); 540 dev_err(dev, "Couldn't create HDMI encoder regmap\n");
541 return PTR_ERR(hdmi->regmap); 541 ret = PTR_ERR(hdmi->regmap);
542 goto err_disable_mod_clk;
542 } 543 }
543 544
544 ret = sun4i_tmds_create(hdmi); 545 ret = sun4i_tmds_create(hdmi);
@@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
551 hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc"); 552 hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
552 if (IS_ERR(hdmi->ddc_parent_clk)) { 553 if (IS_ERR(hdmi->ddc_parent_clk)) {
553 dev_err(dev, "Couldn't get the HDMI DDC clock\n"); 554 dev_err(dev, "Couldn't get the HDMI DDC clock\n");
554 return PTR_ERR(hdmi->ddc_parent_clk); 555 ret = PTR_ERR(hdmi->ddc_parent_clk);
556 goto err_disable_mod_clk;
555 } 557 }
556 } else { 558 } else {
557 hdmi->ddc_parent_clk = hdmi->tmds_clk; 559 hdmi->ddc_parent_clk = hdmi->tmds_clk;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 2de586b7c98b..a818ca491605 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -103,6 +103,7 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
103 103
104 if (enabled) { 104 if (enabled) {
105 clk_prepare_enable(clk); 105 clk_prepare_enable(clk);
106 clk_rate_exclusive_get(clk);
106 } else { 107 } else {
107 clk_rate_exclusive_put(clk); 108 clk_rate_exclusive_put(clk);
108 clk_disable_unprepare(clk); 109 clk_disable_unprepare(clk);
@@ -262,7 +263,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
262 const struct drm_display_mode *mode) 263 const struct drm_display_mode *mode)
263{ 264{
264 /* Configure the dot clock */ 265 /* Configure the dot clock */
265 clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000); 266 clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
266 267
267 /* Set the resolution */ 268 /* Set the resolution */
268 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, 269 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -423,7 +424,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
423 WARN_ON(!tcon->quirks->has_channel_1); 424 WARN_ON(!tcon->quirks->has_channel_1);
424 425
425 /* Configure the dot clock */ 426 /* Configure the dot clock */
426 clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000); 427 clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
427 428
428 /* Adjust clock delay */ 429 /* Adjust clock delay */
429 clk_delay = sun4i_tcon_get_clk_delay(mode, 1); 430 clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b8403ed48285..fbffe1948b3b 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1903,8 +1903,12 @@ cleanup:
1903 if (!IS_ERR(primary)) 1903 if (!IS_ERR(primary))
1904 drm_plane_cleanup(primary); 1904 drm_plane_cleanup(primary);
1905 1905
1906 if (group && tegra->domain) { 1906 if (group && dc->domain) {
1907 iommu_detach_group(tegra->domain, group); 1907 if (group == tegra->group) {
1908 iommu_detach_group(dc->domain, group);
1909 tegra->group = NULL;
1910 }
1911
1908 dc->domain = NULL; 1912 dc->domain = NULL;
1909 } 1913 }
1910 1914
@@ -1913,8 +1917,10 @@ cleanup:
1913 1917
1914static int tegra_dc_exit(struct host1x_client *client) 1918static int tegra_dc_exit(struct host1x_client *client)
1915{ 1919{
1920 struct drm_device *drm = dev_get_drvdata(client->parent);
1916 struct iommu_group *group = iommu_group_get(client->dev); 1921 struct iommu_group *group = iommu_group_get(client->dev);
1917 struct tegra_dc *dc = host1x_client_to_dc(client); 1922 struct tegra_dc *dc = host1x_client_to_dc(client);
1923 struct tegra_drm *tegra = drm->dev_private;
1918 int err; 1924 int err;
1919 1925
1920 devm_free_irq(dc->dev, dc->irq, dc); 1926 devm_free_irq(dc->dev, dc->irq, dc);
@@ -1926,7 +1932,11 @@ static int tegra_dc_exit(struct host1x_client *client)
1926 } 1932 }
1927 1933
1928 if (group && dc->domain) { 1934 if (group && dc->domain) {
1929 iommu_detach_group(dc->domain, group); 1935 if (group == tegra->group) {
1936 iommu_detach_group(dc->domain, group);
1937 tegra->group = NULL;
1938 }
1939
1930 dc->domain = NULL; 1940 dc->domain = NULL;
1931 } 1941 }
1932 1942
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d50bddb2e447..7fcf4a242840 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm)
250 250
251 drm_kms_helper_poll_fini(drm); 251 drm_kms_helper_poll_fini(drm);
252 tegra_drm_fb_exit(drm); 252 tegra_drm_fb_exit(drm);
253 drm_atomic_helper_shutdown(drm);
253 drm_mode_config_cleanup(drm); 254 drm_mode_config_cleanup(drm);
254 255
255 err = host1x_device_exit(device); 256 err = host1x_device_exit(device);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 4d2ed966f9e3..87c5d89bc9ba 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1072,7 +1072,6 @@ static int tegra_dsi_exit(struct host1x_client *client)
1072 struct tegra_dsi *dsi = host1x_client_to_dsi(client); 1072 struct tegra_dsi *dsi = host1x_client_to_dsi(client);
1073 1073
1074 tegra_output_exit(&dsi->output); 1074 tegra_output_exit(&dsi->output);
1075 regulator_disable(dsi->vdd);
1076 1075
1077 return 0; 1076 return 0;
1078} 1077}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 36a06a993698..94dac79ac3c9 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -297,6 +297,10 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
297 case WIN_COLOR_DEPTH_B8G8R8X8: 297 case WIN_COLOR_DEPTH_B8G8R8X8:
298 *alpha = WIN_COLOR_DEPTH_B8G8R8A8; 298 *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
299 return 0; 299 return 0;
300
301 case WIN_COLOR_DEPTH_B5G6R5:
302 *alpha = opaque;
303 return 0;
300 } 304 }
301 305
302 return -EINVAL; 306 return -EINVAL;
@@ -330,9 +334,6 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
330 unsigned int zpos[2]; 334 unsigned int zpos[2];
331 unsigned int i; 335 unsigned int i;
332 336
333 for (i = 0; i < 3; i++)
334 state->dependent[i] = false;
335
336 for (i = 0; i < 2; i++) 337 for (i = 0; i < 2; i++)
337 zpos[i] = 0; 338 zpos[i] = 0;
338 339
@@ -346,6 +347,8 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
346 347
347 index = tegra_plane_get_overlap_index(tegra, p); 348 index = tegra_plane_get_overlap_index(tegra, p);
348 349
350 state->dependent[index] = false;
351
349 /* 352 /*
350 * If any of the other planes is on top of this plane and uses 353 * If any of the other planes is on top of this plane and uses
351 * a format with an alpha component, mark this plane as being 354 * a format with an alpha component, mark this plane as being
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index b5b335c9b2bb..2ebdc6d5a76e 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
159{ 159{
160 unsigned long start = vma->vm_start; 160 unsigned long start = vma->vm_start;
161 unsigned long size = vma->vm_end - vma->vm_start; 161 unsigned long size = vma->vm_end - vma->vm_start;
162 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 162 unsigned long offset;
163 unsigned long page, pos; 163 unsigned long page, pos;
164 164
165 if (offset + size > info->fix.smem_len) 165 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
166 return -EINVAL;
167
168 offset = vma->vm_pgoff << PAGE_SHIFT;
169
170 if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
166 return -EINVAL; 171 return -EINVAL;
167 172
168 pos = (unsigned long)info->fix.smem_start + offset; 173 pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 184340d486c3..86d25f18aa99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
1337 */ 1337 */
1338void vmw_svga_disable(struct vmw_private *dev_priv) 1338void vmw_svga_disable(struct vmw_private *dev_priv)
1339{ 1339{
1340 /*
1341 * Disabling SVGA will turn off device modesetting capabilities, so
1342 * notify KMS about that so that it doesn't cache atomic state that
1343 * isn't valid anymore, for example crtcs turned on.
1344 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1345 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1346 * end up with lock order reversal. Thus, a master may actually perform
1347 * a new modeset just after we call vmw_kms_lost_device() and race with
1348 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1349 * to be inconsistent with the device, causing modesetting problems.
1350 *
1351 */
1352 vmw_kms_lost_device(dev_priv->dev);
1340 ttm_write_lock(&dev_priv->reservation_sem, false); 1353 ttm_write_lock(&dev_priv->reservation_sem, false);
1341 spin_lock(&dev_priv->svga_lock); 1354 spin_lock(&dev_priv->svga_lock);
1342 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1355 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d08753e8fd94..9116fe8baebc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
938int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 938int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
939 struct drm_file *file_priv); 939 struct drm_file *file_priv);
940void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); 940void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
941void vmw_kms_lost_device(struct drm_device *dev);
941 942
942int vmw_dumb_create(struct drm_file *file_priv, 943int vmw_dumb_create(struct drm_file *file_priv,
943 struct drm_device *dev, 944 struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ead61015cd79..3c824fd7cbf3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,7 +31,6 @@
31#include <drm/drm_atomic_helper.h> 31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_rect.h> 32#include <drm/drm_rect.h>
33 33
34
35/* Might need a hrtimer here? */ 34/* Might need a hrtimer here? */
36#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 35#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
37 36
@@ -2517,9 +2516,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2517 * Helper to be used if an error forces the caller to undo the actions of 2516 * Helper to be used if an error forces the caller to undo the actions of
2518 * vmw_kms_helper_resource_prepare. 2517 * vmw_kms_helper_resource_prepare.
2519 */ 2518 */
2520void vmw_kms_helper_resource_revert(struct vmw_resource *res) 2519void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2521{ 2520{
2522 vmw_kms_helper_buffer_revert(res->backup); 2521 struct vmw_resource *res = ctx->res;
2522
2523 vmw_kms_helper_buffer_revert(ctx->buf);
2524 vmw_dmabuf_unreference(&ctx->buf);
2523 vmw_resource_unreserve(res, false, NULL, 0); 2525 vmw_resource_unreserve(res, false, NULL, 0);
2524 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2526 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2525} 2527}
@@ -2536,10 +2538,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
2536 * interrupted by a signal. 2538 * interrupted by a signal.
2537 */ 2539 */
2538int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 2540int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2539 bool interruptible) 2541 bool interruptible,
2542 struct vmw_validation_ctx *ctx)
2540{ 2543{
2541 int ret = 0; 2544 int ret = 0;
2542 2545
2546 ctx->buf = NULL;
2547 ctx->res = res;
2548
2543 if (interruptible) 2549 if (interruptible)
2544 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); 2550 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2545 else 2551 else
@@ -2558,6 +2564,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2558 res->dev_priv->has_mob); 2564 res->dev_priv->has_mob);
2559 if (ret) 2565 if (ret)
2560 goto out_unreserve; 2566 goto out_unreserve;
2567
2568 ctx->buf = vmw_dmabuf_reference(res->backup);
2561 } 2569 }
2562 ret = vmw_resource_validate(res); 2570 ret = vmw_resource_validate(res);
2563 if (ret) 2571 if (ret)
@@ -2565,7 +2573,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2565 return 0; 2573 return 0;
2566 2574
2567out_revert: 2575out_revert:
2568 vmw_kms_helper_buffer_revert(res->backup); 2576 vmw_kms_helper_buffer_revert(ctx->buf);
2569out_unreserve: 2577out_unreserve:
2570 vmw_resource_unreserve(res, false, NULL, 0); 2578 vmw_resource_unreserve(res, false, NULL, 0);
2571out_unlock: 2579out_unlock:
@@ -2581,11 +2589,13 @@ out_unlock:
2581 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 2589 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2582 * ref-counted fence pointer is returned here. 2590 * ref-counted fence pointer is returned here.
2583 */ 2591 */
2584void vmw_kms_helper_resource_finish(struct vmw_resource *res, 2592void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2585 struct vmw_fence_obj **out_fence) 2593 struct vmw_fence_obj **out_fence)
2586{ 2594{
2587 if (res->backup || out_fence) 2595 struct vmw_resource *res = ctx->res;
2588 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, 2596
2597 if (ctx->buf || out_fence)
2598 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2589 out_fence, NULL); 2599 out_fence, NULL);
2590 2600
2591 vmw_resource_unreserve(res, false, NULL, 0); 2601 vmw_resource_unreserve(res, false, NULL, 0);
@@ -2851,3 +2861,14 @@ int vmw_kms_set_config(struct drm_mode_set *set,
2851 2861
2852 return drm_atomic_helper_set_config(set, ctx); 2862 return drm_atomic_helper_set_config(set, ctx);
2853} 2863}
2864
2865
2866/**
2867 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2868 *
2869 * @dev: Pointer to the drm device
2870 */
2871void vmw_kms_lost_device(struct drm_device *dev)
2872{
2873 drm_atomic_helper_shutdown(dev);
2874}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index cd9da2dd79af..3d2ca280eaa7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -240,6 +240,11 @@ struct vmw_display_unit {
240 int set_gui_y; 240 int set_gui_y;
241}; 241};
242 242
243struct vmw_validation_ctx {
244 struct vmw_resource *res;
245 struct vmw_dma_buffer *buf;
246};
247
243#define vmw_crtc_to_du(x) \ 248#define vmw_crtc_to_du(x) \
244 container_of(x, struct vmw_display_unit, crtc) 249 container_of(x, struct vmw_display_unit, crtc)
245#define vmw_connector_to_du(x) \ 250#define vmw_connector_to_du(x) \
@@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
296 struct drm_vmw_fence_rep __user * 301 struct drm_vmw_fence_rep __user *
297 user_fence_rep); 302 user_fence_rep);
298int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 303int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
299 bool interruptible); 304 bool interruptible,
300void vmw_kms_helper_resource_revert(struct vmw_resource *res); 305 struct vmw_validation_ctx *ctx);
301void vmw_kms_helper_resource_finish(struct vmw_resource *res, 306void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
307void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
302 struct vmw_fence_obj **out_fence); 308 struct vmw_fence_obj **out_fence);
303int vmw_kms_readback(struct vmw_private *dev_priv, 309int vmw_kms_readback(struct vmw_private *dev_priv,
304 struct drm_file *file_priv, 310 struct drm_file *file_priv,
@@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
439 445
440int vmw_kms_set_config(struct drm_mode_set *set, 446int vmw_kms_set_config(struct drm_mode_set *set,
441 struct drm_modeset_acquire_ctx *ctx); 447 struct drm_modeset_acquire_ctx *ctx);
442
443#endif 448#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 63a4cd794b73..3ec9eae831b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
909 struct vmw_framebuffer_surface *vfbs = 909 struct vmw_framebuffer_surface *vfbs =
910 container_of(framebuffer, typeof(*vfbs), base); 910 container_of(framebuffer, typeof(*vfbs), base);
911 struct vmw_kms_sou_surface_dirty sdirty; 911 struct vmw_kms_sou_surface_dirty sdirty;
912 struct vmw_validation_ctx ctx;
912 int ret; 913 int ret;
913 914
914 if (!srf) 915 if (!srf)
915 srf = &vfbs->surface->res; 916 srf = &vfbs->surface->res;
916 917
917 ret = vmw_kms_helper_resource_prepare(srf, true); 918 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
918 if (ret) 919 if (ret)
919 return ret; 920 return ret;
920 921
@@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
933 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 934 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
934 dest_x, dest_y, num_clips, inc, 935 dest_x, dest_y, num_clips, inc,
935 &sdirty.base); 936 &sdirty.base);
936 vmw_kms_helper_resource_finish(srf, out_fence); 937 vmw_kms_helper_resource_finish(&ctx, out_fence);
937 938
938 return ret; 939 return ret;
939} 940}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b68d74888ab1..6b969e5dea2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
980 struct vmw_framebuffer_surface *vfbs = 980 struct vmw_framebuffer_surface *vfbs =
981 container_of(framebuffer, typeof(*vfbs), base); 981 container_of(framebuffer, typeof(*vfbs), base);
982 struct vmw_stdu_dirty sdirty; 982 struct vmw_stdu_dirty sdirty;
983 struct vmw_validation_ctx ctx;
983 int ret; 984 int ret;
984 985
985 if (!srf) 986 if (!srf)
986 srf = &vfbs->surface->res; 987 srf = &vfbs->surface->res;
987 988
988 ret = vmw_kms_helper_resource_prepare(srf, true); 989 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
989 if (ret) 990 if (ret)
990 return ret; 991 return ret;
991 992
@@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
1008 dest_x, dest_y, num_clips, inc, 1009 dest_x, dest_y, num_clips, inc,
1009 &sdirty.base); 1010 &sdirty.base);
1010out_finish: 1011out_finish:
1011 vmw_kms_helper_resource_finish(srf, out_fence); 1012 vmw_kms_helper_resource_finish(&ctx, out_fence);
1012 1013
1013 return ret; 1014 return ret;
1014} 1015}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 97b99500153d..83f9dd934a5d 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -250,10 +250,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
250{ 250{
251 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); 251 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
252 struct ipu_prg *prg = ipu_chan->ipu->prg_priv; 252 struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
253 struct ipu_prg_channel *chan = &prg->chan[prg_chan]; 253 struct ipu_prg_channel *chan;
254 u32 val; 254 u32 val;
255 255
256 if (!chan->enabled || prg_chan < 0) 256 if (prg_chan < 0)
257 return;
258
259 chan = &prg->chan[prg_chan];
260 if (!chan->enabled)
257 return; 261 return;
258 262
259 pm_runtime_get_sync(prg->dev); 263 pm_runtime_get_sync(prg->dev);
@@ -280,13 +284,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
280{ 284{
281 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); 285 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
282 struct ipu_prg *prg = ipu_chan->ipu->prg_priv; 286 struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
283 struct ipu_prg_channel *chan = &prg->chan[prg_chan]; 287 struct ipu_prg_channel *chan;
284 u32 val; 288 u32 val;
285 int ret; 289 int ret;
286 290
287 if (prg_chan < 0) 291 if (prg_chan < 0)
288 return prg_chan; 292 return prg_chan;
289 293
294 chan = &prg->chan[prg_chan];
295
290 if (chan->enabled) { 296 if (chan->enabled) {
291 ipu_pre_update(prg->pres[chan->used_pre], *eba); 297 ipu_pre_update(prg->pres[chan->used_pre], *eba);
292 return 0; 298 return 0;