aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c47
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c36
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c4
29 files changed, 170 insertions, 131 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 236d9950221b..c0d8c6ff6380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
425 425
426void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) 426void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
427{ 427{
428 struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; 428 struct amdgpu_fbdev *afbdev;
429 struct drm_fb_helper *fb_helper; 429 struct drm_fb_helper *fb_helper;
430 int ret; 430 int ret;
431 431
432 if (!adev)
433 return;
434
435 afbdev = adev->mode_info.rfbdev;
436
432 if (!afbdev) 437 if (!afbdev)
433 return; 438 return;
434 439
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 749a6cde7985..83c172a6e938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -635,7 +635,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
635 mutex_unlock(&id_mgr->lock); 635 mutex_unlock(&id_mgr->lock);
636 } 636 }
637 637
638 if (gds_switch_needed) { 638 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
639 id->gds_base = job->gds_base; 639 id->gds_base = job->gds_base;
640 id->gds_size = job->gds_size; 640 id->gds_size = job->gds_size;
641 id->gws_base = job->gws_base; 641 id->gws_base = job->gws_base;
@@ -673,6 +673,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
673 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 673 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
674 struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; 674 struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
675 675
676 atomic64_set(&id->owner, 0);
676 id->gds_base = 0; 677 id->gds_base = 0;
677 id->gds_size = 0; 678 id->gds_size = 0;
678 id->gws_base = 0; 679 id->gws_base = 0;
@@ -682,6 +683,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
682} 683}
683 684
684/** 685/**
686 * amdgpu_vm_reset_all_id - reset VMID to zero
687 *
688 * @adev: amdgpu device structure
689 *
690 * Reset VMID to force flush on next use
691 */
692void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
693{
694 unsigned i, j;
695
696 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
697 struct amdgpu_vm_id_manager *id_mgr =
698 &adev->vm_manager.id_mgr[i];
699
700 for (j = 1; j < id_mgr->num_ids; ++j)
701 amdgpu_vm_reset_id(adev, i, j);
702 }
703}
704
705/**
685 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 706 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
686 * 707 *
687 * @vm: requested vm 708 * @vm: requested vm
@@ -2271,7 +2292,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2271 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 2292 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2272 adev->vm_manager.seqno[i] = 0; 2293 adev->vm_manager.seqno[i] = 0;
2273 2294
2274
2275 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); 2295 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2276 atomic64_set(&adev->vm_manager.client_counter, 0); 2296 atomic64_set(&adev->vm_manager.client_counter, 0);
2277 spin_lock_init(&adev->vm_manager.prt_lock); 2297 spin_lock_init(&adev->vm_manager.prt_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d97e28b4bdc4..e1d951ece433 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
204int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); 204int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
205void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, 205void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
206 unsigned vmid); 206 unsigned vmid);
207void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
207int amdgpu_vm_update_directories(struct amdgpu_device *adev, 208int amdgpu_vm_update_directories(struct amdgpu_device *adev,
208 struct amdgpu_vm *vm); 209 struct amdgpu_vm *vm);
209int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 210int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 00e56a28b593..cb508a211b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
906 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 906 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
907 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; 907 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
908 908
909 /* disable mclk switching if the refresh is >120Hz, even if the
910 * blanking period would allow it
911 */
912 if (amdgpu_dpm_get_vrefresh(adev) > 120)
913 return true;
914
909 if (vblank_time < switch_limit) 915 if (vblank_time < switch_limit)
910 return true; 916 return true;
911 else 917 else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 2a3983036a30..9776ad3d2d71 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
950{ 950{
951 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 951 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
952 952
953 if (adev->vm_manager.enabled) {
954 gmc_v6_0_vm_fini(adev);
955 adev->vm_manager.enabled = false;
956 }
957 gmc_v6_0_hw_fini(adev); 953 gmc_v6_0_hw_fini(adev);
958 954
959 return 0; 955 return 0;
@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
968 if (r) 964 if (r)
969 return r; 965 return r;
970 966
971 if (!adev->vm_manager.enabled) { 967 amdgpu_vm_reset_all_ids(adev);
972 r = gmc_v6_0_vm_init(adev);
973 if (r) {
974 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
975 return r;
976 }
977 adev->vm_manager.enabled = true;
978 }
979 968
980 return r; 969 return 0;
981} 970}
982 971
983static bool gmc_v6_0_is_idle(void *handle) 972static bool gmc_v6_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 6d347c1d2516..fca8e77182c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
1117{ 1117{
1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1119 1119
1120 if (adev->vm_manager.enabled) {
1121 gmc_v7_0_vm_fini(adev);
1122 adev->vm_manager.enabled = false;
1123 }
1124 gmc_v7_0_hw_fini(adev); 1120 gmc_v7_0_hw_fini(adev);
1125 1121
1126 return 0; 1122 return 0;
@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
1135 if (r) 1131 if (r)
1136 return r; 1132 return r;
1137 1133
1138 if (!adev->vm_manager.enabled) { 1134 amdgpu_vm_reset_all_ids(adev);
1139 r = gmc_v7_0_vm_init(adev);
1140 if (r) {
1141 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1142 return r;
1143 }
1144 adev->vm_manager.enabled = true;
1145 }
1146 1135
1147 return r; 1136 return 0;
1148} 1137}
1149 1138
1150static bool gmc_v7_0_is_idle(void *handle) 1139static bool gmc_v7_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 2784ff49cf56..e9c127037b39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
1209{ 1209{
1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211 1211
1212 if (adev->vm_manager.enabled) {
1213 gmc_v8_0_vm_fini(adev);
1214 adev->vm_manager.enabled = false;
1215 }
1216 gmc_v8_0_hw_fini(adev); 1212 gmc_v8_0_hw_fini(adev);
1217 1213
1218 return 0; 1214 return 0;
@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
1227 if (r) 1223 if (r)
1228 return r; 1224 return r;
1229 1225
1230 if (!adev->vm_manager.enabled) { 1226 amdgpu_vm_reset_all_ids(adev);
1231 r = gmc_v8_0_vm_init(adev);
1232 if (r) {
1233 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1234 return r;
1235 }
1236 adev->vm_manager.enabled = true;
1237 }
1238 1227
1239 return r; 1228 return 0;
1240} 1229}
1241 1230
1242static bool gmc_v8_0_is_idle(void *handle) 1231static bool gmc_v8_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index dc1e1c1d6b24..f936332a069d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
791{ 791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 793
794 if (adev->vm_manager.enabled) {
795 gmc_v9_0_vm_fini(adev);
796 adev->vm_manager.enabled = false;
797 }
798 gmc_v9_0_hw_fini(adev); 794 gmc_v9_0_hw_fini(adev);
799 795
800 return 0; 796 return 0;
@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
809 if (r) 805 if (r)
810 return r; 806 return r;
811 807
812 if (!adev->vm_manager.enabled) { 808 amdgpu_vm_reset_all_ids(adev);
813 r = gmc_v9_0_vm_init(adev);
814 if (r) {
815 dev_err(adev->dev,
816 "vm manager initialization failed (%d).\n", r);
817 return r;
818 }
819 adev->vm_manager.enabled = true;
820 }
821 809
822 return r; 810 return 0;
823} 811}
824 812
825static bool gmc_v9_0_is_idle(void *handle) 813static bool gmc_v9_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 975567f6813d..1f01020ce3a9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2655 return sizeof(struct smu7_power_state); 2655 return sizeof(struct smu7_power_state);
2656} 2656}
2657 2657
2658static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2659 uint32_t vblank_time_us)
2660{
2661 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2662 uint32_t switch_limit_us;
2663
2664 switch (hwmgr->chip_id) {
2665 case CHIP_POLARIS10:
2666 case CHIP_POLARIS11:
2667 case CHIP_POLARIS12:
2668 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2669 break;
2670 default:
2671 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2672 break;
2673 }
2674
2675 if (vblank_time_us < switch_limit_us)
2676 return true;
2677 else
2678 return false;
2679}
2658 2680
2659static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 2681static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2660 struct pp_power_state *request_ps, 2682 struct pp_power_state *request_ps,
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2669 bool disable_mclk_switching; 2691 bool disable_mclk_switching;
2670 bool disable_mclk_switching_for_frame_lock; 2692 bool disable_mclk_switching_for_frame_lock;
2671 struct cgs_display_info info = {0}; 2693 struct cgs_display_info info = {0};
2694 struct cgs_mode_info mode_info = {0};
2672 const struct phm_clock_and_voltage_limits *max_limits; 2695 const struct phm_clock_and_voltage_limits *max_limits;
2673 uint32_t i; 2696 uint32_t i;
2674 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2697 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2677 int32_t count; 2700 int32_t count;
2678 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 2701 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2679 2702
2703 info.mode_info = &mode_info;
2680 data->battery_state = (PP_StateUILabel_Battery == 2704 data->battery_state = (PP_StateUILabel_Battery ==
2681 request_ps->classification.ui_label); 2705 request_ps->classification.ui_label);
2682 2706
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2703 2727
2704 cgs_get_active_displays_info(hwmgr->device, &info); 2728 cgs_get_active_displays_info(hwmgr->device, &info);
2705 2729
2706 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2707
2708 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; 2730 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2709 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; 2731 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2710 2732
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2769 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 2791 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2770 2792
2771 2793
2772 disable_mclk_switching = (1 < info.display_count) || 2794 disable_mclk_switching = ((1 < info.display_count) ||
2773 disable_mclk_switching_for_frame_lock; 2795 disable_mclk_switching_for_frame_lock ||
2796 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2797 (mode_info.refresh_rate > 120));
2774 2798
2775 sclk = smu7_ps->performance_levels[0].engine_clock; 2799 sclk = smu7_ps->performance_levels[0].engine_clock;
2776 mclk = smu7_ps->performance_levels[0].memory_clock; 2800 mclk = smu7_ps->performance_levels[0].memory_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index cd7bf6d3859a..ab17350e853d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4187,7 +4187,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4187 enum pp_clock_type type, uint32_t mask) 4187 enum pp_clock_type type, uint32_t mask)
4188{ 4188{
4189 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 4189 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4190 uint32_t i; 4190 int i;
4191 4191
4192 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 4192 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4193 return -EINVAL; 4193 return -EINVAL;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 798a3cc480a2..1a3359c0f6cd 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h> 14#include <drm/drm_atomic_helper.h>
14#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
15#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
@@ -226,16 +227,33 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
226static int hdlcd_plane_atomic_check(struct drm_plane *plane, 227static int hdlcd_plane_atomic_check(struct drm_plane *plane,
227 struct drm_plane_state *state) 228 struct drm_plane_state *state)
228{ 229{
229 u32 src_w, src_h; 230 struct drm_rect clip = { 0 };
231 struct drm_crtc_state *crtc_state;
232 u32 src_h = state->src_h >> 16;
230 233
231 src_w = state->src_w >> 16; 234 /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
232 src_h = state->src_h >> 16; 235 if (src_h >= HDLCD_MAX_YRES) {
236 DRM_DEBUG_KMS("Invalid source width: %d\n", src_h);
237 return -EINVAL;
238 }
239
240 if (!state->fb || !state->crtc)
241 return 0;
233 242
234 /* we can't do any scaling of the plane source */ 243 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
235 if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) 244 state->crtc);
245 if (!crtc_state) {
246 DRM_DEBUG_KMS("Invalid crtc state\n");
236 return -EINVAL; 247 return -EINVAL;
248 }
237 249
238 return 0; 250 clip.x2 = crtc_state->adjusted_mode.hdisplay;
251 clip.y2 = crtc_state->adjusted_mode.vdisplay;
252
253 return drm_plane_helper_check_state(state, &clip,
254 DRM_PLANE_HELPER_NO_SCALING,
255 DRM_PLANE_HELPER_NO_SCALING,
256 false, true);
239} 257}
240 258
241static void hdlcd_plane_atomic_update(struct drm_plane *plane, 259static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -244,21 +262,20 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
244 struct drm_framebuffer *fb = plane->state->fb; 262 struct drm_framebuffer *fb = plane->state->fb;
245 struct hdlcd_drm_private *hdlcd; 263 struct hdlcd_drm_private *hdlcd;
246 struct drm_gem_cma_object *gem; 264 struct drm_gem_cma_object *gem;
247 u32 src_w, src_h, dest_w, dest_h; 265 u32 src_x, src_y, dest_h;
248 dma_addr_t scanout_start; 266 dma_addr_t scanout_start;
249 267
250 if (!fb) 268 if (!fb)
251 return; 269 return;
252 270
253 src_w = plane->state->src_w >> 16; 271 src_x = plane->state->src.x1 >> 16;
254 src_h = plane->state->src_h >> 16; 272 src_y = plane->state->src.y1 >> 16;
255 dest_w = plane->state->crtc_w; 273 dest_h = drm_rect_height(&plane->state->dst);
256 dest_h = plane->state->crtc_h;
257 gem = drm_fb_cma_get_gem_obj(fb, 0); 274 gem = drm_fb_cma_get_gem_obj(fb, 0);
275
258 scanout_start = gem->paddr + fb->offsets[0] + 276 scanout_start = gem->paddr + fb->offsets[0] +
259 plane->state->crtc_y * fb->pitches[0] + 277 src_y * fb->pitches[0] +
260 plane->state->crtc_x * 278 src_x * fb->format->cpp[0];
261 fb->format->cpp[0];
262 279
263 hdlcd = plane->dev->dev_private; 280 hdlcd = plane->dev->dev_private;
264 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); 281 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
@@ -305,7 +322,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
305 formats, ARRAY_SIZE(formats), 322 formats, ARRAY_SIZE(formats),
306 DRM_PLANE_TYPE_PRIMARY, NULL); 323 DRM_PLANE_TYPE_PRIMARY, NULL);
307 if (ret) { 324 if (ret) {
308 devm_kfree(drm->dev, plane);
309 return ERR_PTR(ret); 325 return ERR_PTR(ret);
310 } 326 }
311 327
@@ -329,7 +345,6 @@ int hdlcd_setup_crtc(struct drm_device *drm)
329 &hdlcd_crtc_funcs, NULL); 345 &hdlcd_crtc_funcs, NULL);
330 if (ret) { 346 if (ret) {
331 hdlcd_plane_destroy(primary); 347 hdlcd_plane_destroy(primary);
332 devm_kfree(drm->dev, primary);
333 return ret; 348 return ret;
334 } 349 }
335 350
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 65a3bd7a0c00..423dda2785d4 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -152,8 +152,7 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
153}; 153};
154 154
155static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, 155static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
156 const struct device_node *np)
157{ 156{
158 struct atmel_hlcdc_dc *dc = dev->dev_private; 157 struct atmel_hlcdc_dc *dc = dev->dev_private;
159 struct atmel_hlcdc_rgb_output *output; 158 struct atmel_hlcdc_rgb_output *output;
@@ -161,6 +160,11 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
161 struct drm_bridge *bridge; 160 struct drm_bridge *bridge;
162 int ret; 161 int ret;
163 162
163 ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
164 &panel, &bridge);
165 if (ret)
166 return ret;
167
164 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); 168 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
165 if (!output) 169 if (!output)
166 return -EINVAL; 170 return -EINVAL;
@@ -177,10 +181,6 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
177 181
178 output->encoder.possible_crtcs = 0x1; 182 output->encoder.possible_crtcs = 0x1;
179 183
180 ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
181 if (ret)
182 return ret;
183
184 if (panel) { 184 if (panel) {
185 output->connector.dpms = DRM_MODE_DPMS_OFF; 185 output->connector.dpms = DRM_MODE_DPMS_OFF;
186 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT; 186 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -220,22 +220,14 @@ err_encoder_cleanup:
220 220
221int atmel_hlcdc_create_outputs(struct drm_device *dev) 221int atmel_hlcdc_create_outputs(struct drm_device *dev)
222{ 222{
223 struct device_node *remote; 223 int endpoint, ret = 0;
224 int ret = -ENODEV; 224
225 int endpoint = 0; 225 for (endpoint = 0; !ret; endpoint++)
226 226 ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
227 while (true) { 227
228 /* Loop thru possible multiple connections to the output */ 228 /* At least one device was successfully attached.*/
229 remote = of_graph_get_remote_node(dev->dev->of_node, 0, 229 if (ret == -ENODEV && endpoint)
230 endpoint++); 230 return 0;
231 if (!remote)
232 break;
233
234 ret = atmel_hlcdc_attach_endpoint(dev, remote);
235 of_node_put(remote);
236 if (ret)
237 return ret;
238 }
239 231
240 return ret; 232 return ret;
241} 233}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index fedd4d60d9cd..5dc8c4350602 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -948,8 +948,6 @@ retry:
948 } 948 }
949 949
950out: 950out:
951 if (ret && crtc->funcs->page_flip_target)
952 drm_crtc_vblank_put(crtc);
953 if (fb) 951 if (fb)
954 drm_framebuffer_put(fb); 952 drm_framebuffer_put(fb);
955 if (crtc->primary->old_fb) 953 if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
964 drm_modeset_drop_locks(&ctx); 962 drm_modeset_drop_locks(&ctx);
965 drm_modeset_acquire_fini(&ctx); 963 drm_modeset_acquire_fini(&ctx);
966 964
965 if (ret && crtc->funcs->page_flip_target)
966 drm_crtc_vblank_put(crtc);
967
967 return ret; 968 return ret;
968} 969}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index a13930e1d8c9..ee7069e93eda 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -44,6 +44,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
44 44
45 /* initially, until copy_from_user() and bo lookup succeeds: */ 45 /* initially, until copy_from_user() and bo lookup succeeds: */
46 submit->nr_bos = 0; 46 submit->nr_bos = 0;
47 submit->fence = NULL;
47 48
48 ww_acquire_init(&submit->ticket, &reservation_ww_class); 49 ww_acquire_init(&submit->ticket, &reservation_ww_class);
49 } 50 }
@@ -294,7 +295,8 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
294 } 295 }
295 296
296 ww_acquire_fini(&submit->ticket); 297 ww_acquire_fini(&submit->ticket);
297 dma_fence_put(submit->fence); 298 if (submit->fence)
299 dma_fence_put(submit->fence);
298 kfree(submit); 300 kfree(submit);
299} 301}
300 302
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 0066fe7e622e..be3eefec5152 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
759 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 759 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
760 mode_dev->panel_fixed_mode = 760 mode_dev->panel_fixed_mode =
761 drm_mode_duplicate(dev, scan); 761 drm_mode_duplicate(dev, scan);
762 DRM_DEBUG_KMS("Using mode from DDC\n");
762 goto out; /* FIXME: check for quirks */ 763 goto out; /* FIXME: check for quirks */
763 } 764 }
764 } 765 }
765 766
766 /* Failed to get EDID, what about VBT? do we need this? */ 767 /* Failed to get EDID, what about VBT? do we need this? */
767 if (mode_dev->vbt_mode) 768 if (dev_priv->lfp_lvds_vbt_mode) {
768 mode_dev->panel_fixed_mode = 769 mode_dev->panel_fixed_mode =
769 drm_mode_duplicate(dev, mode_dev->vbt_mode); 770 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
770 771
771 if (!mode_dev->panel_fixed_mode) 772 if (mode_dev->panel_fixed_mode) {
772 if (dev_priv->lfp_lvds_vbt_mode) 773 mode_dev->panel_fixed_mode->type |=
773 mode_dev->panel_fixed_mode = 774 DRM_MODE_TYPE_PREFERRED;
774 drm_mode_duplicate(dev, 775 DRM_DEBUG_KMS("Using mode from VBT\n");
775 dev_priv->lfp_lvds_vbt_mode); 776 goto out;
777 }
778 }
776 779
777 /* 780 /*
778 * If we didn't get EDID, try checking if the panel is already turned 781 * If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
789 if (mode_dev->panel_fixed_mode) { 792 if (mode_dev->panel_fixed_mode) {
790 mode_dev->panel_fixed_mode->type |= 793 mode_dev->panel_fixed_mode->type |=
791 DRM_MODE_TYPE_PREFERRED; 794 DRM_MODE_TYPE_PREFERRED;
795 DRM_DEBUG_KMS("Using pre-programmed mode\n");
792 goto out; /* FIXME: check for quirks */ 796 goto out; /* FIXME: check for quirks */
793 } 797 }
794 } 798 }
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0ad1a508e2af..c995e540ff96 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1244,7 +1244,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1244 mode = vgpu_vreg(vgpu, offset); 1244 mode = vgpu_vreg(vgpu, offset);
1245 1245
1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { 1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1247 WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n", 1247 WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
1248 vgpu->id); 1248 vgpu->id);
1249 return 0; 1249 return 0;
1250 } 1250 }
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index c6e7972ac21d..a5e11d89df2f 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -340,6 +340,9 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
340 } else 340 } else
341 v = mmio->value; 341 v = mmio->value;
342 342
343 if (mmio->in_context)
344 continue;
345
343 I915_WRITE(mmio->reg, v); 346 I915_WRITE(mmio->reg, v);
344 POSTING_READ(mmio->reg); 347 POSTING_READ(mmio->reg);
345 348
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 79ba4b3440aa..f25ff133865f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -129,9 +129,13 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
129 struct vgpu_sched_data *vgpu_data; 129 struct vgpu_sched_data *vgpu_data;
130 ktime_t cur_time; 130 ktime_t cur_time;
131 131
132 /* no target to schedule */ 132 /* no need to schedule if next_vgpu is the same with current_vgpu,
133 if (!scheduler->next_vgpu) 133 * let scheduler chose next_vgpu again by setting it to NULL.
134 */
135 if (scheduler->next_vgpu == scheduler->current_vgpu) {
136 scheduler->next_vgpu = NULL;
134 return; 137 return;
138 }
135 139
136 /* 140 /*
137 * after the flag is set, workload dispatch thread will 141 * after the flag is set, workload dispatch thread will
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6718c84fb862..8d1df5678eaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -337,6 +337,8 @@ nouveau_display_hpd_work(struct work_struct *work)
337 pm_runtime_get_sync(drm->dev->dev); 337 pm_runtime_get_sync(drm->dev->dev);
338 338
339 drm_helper_hpd_irq_event(drm->dev); 339 drm_helper_hpd_irq_event(drm->dev);
340 /* enable polling for external displays */
341 drm_kms_helper_poll_enable(drm->dev);
340 342
341 pm_runtime_mark_last_busy(drm->dev->dev); 343 pm_runtime_mark_last_busy(drm->dev->dev);
342 pm_runtime_put_sync(drm->dev->dev); 344 pm_runtime_put_sync(drm->dev->dev);
@@ -390,10 +392,6 @@ nouveau_display_init(struct drm_device *dev)
390 if (ret) 392 if (ret)
391 return ret; 393 return ret;
392 394
393 /* enable polling for external displays */
394 if (!dev->mode_config.poll_enabled)
395 drm_kms_helper_poll_enable(dev);
396
397 /* enable hotplug interrupts */ 395 /* enable hotplug interrupts */
398 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 396 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
399 struct nouveau_connector *conn = nouveau_connector(connector); 397 struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c3dc75fee700..6844372366d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -502,6 +502,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
502 pm_runtime_allow(dev->dev); 502 pm_runtime_allow(dev->dev);
503 pm_runtime_mark_last_busy(dev->dev); 503 pm_runtime_mark_last_busy(dev->dev);
504 pm_runtime_put(dev->dev); 504 pm_runtime_put(dev->dev);
505 } else {
506 /* enable polling for external displays */
507 drm_kms_helper_poll_enable(dev);
505 } 508 }
506 return 0; 509 return 0;
507 510
@@ -774,9 +777,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
774 777
775 ret = nouveau_do_resume(drm_dev, true); 778 ret = nouveau_do_resume(drm_dev, true);
776 779
777 if (!drm_dev->mode_config.poll_enabled)
778 drm_kms_helper_poll_enable(drm_dev);
779
780 /* do magic */ 780 /* do magic */
781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 3a24788c3185..a7e55c422501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -148,7 +148,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
148 case NVKM_MEM_TARGET_NCOH: target = 3; break; 148 case NVKM_MEM_TARGET_NCOH: target = 3; break;
149 default: 149 default:
150 WARN_ON(1); 150 WARN_ON(1);
151 return; 151 goto unlock;
152 } 152 }
153 153
154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
@@ -160,6 +160,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
160 & 0x00100000), 160 & 0x00100000),
161 msecs_to_jiffies(2000)) == 0) 161 msecs_to_jiffies(2000)) == 0)
162 nvkm_error(subdev, "runlist %d update timeout\n", runl); 162 nvkm_error(subdev, "runlist %d update timeout\n", runl);
163unlock:
163 mutex_unlock(&subdev->mutex); 164 mutex_unlock(&subdev->mutex);
164} 165}
165 166
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index d1cf02d22db1..1b0c793c0192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -116,6 +116,7 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
116 ret = nvkm_firmware_get(subdev->device, f, &sig); 116 ret = nvkm_firmware_get(subdev->device, f, &sig);
117 if (ret) 117 if (ret)
118 goto free_data; 118 goto free_data;
119
119 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 120 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
120 if (!img->sig) { 121 if (!img->sig) {
121 ret = -ENOMEM; 122 ret = -ENOMEM;
@@ -126,8 +127,9 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
126 img->ucode_data = ls_ucode_img_build(bl, code, data, 127 img->ucode_data = ls_ucode_img_build(bl, code, data,
127 &img->ucode_desc); 128 &img->ucode_desc);
128 if (IS_ERR(img->ucode_data)) { 129 if (IS_ERR(img->ucode_data)) {
130 kfree(img->sig);
129 ret = PTR_ERR(img->ucode_data); 131 ret = PTR_ERR(img->ucode_data);
130 goto free_data; 132 goto free_sig;
131 } 133 }
132 img->ucode_size = img->ucode_desc.image_size; 134 img->ucode_size = img->ucode_desc.image_size;
133 135
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f5ef81595f5a..03fe182203ce 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -574,8 +574,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
574 if (ret) 574 if (ret)
575 return; 575 return;
576 576
577 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
578
579 if (fb != old_state->fb) { 577 if (fb != old_state->fb) {
580 obj = to_qxl_framebuffer(fb)->obj; 578 obj = to_qxl_framebuffer(fb)->obj;
581 user_bo = gem_to_qxl_bo(obj); 579 user_bo = gem_to_qxl_bo(obj);
@@ -613,6 +611,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
613 qxl_bo_kunmap(cursor_bo); 611 qxl_bo_kunmap(cursor_bo);
614 qxl_bo_kunmap(user_bo); 612 qxl_bo_kunmap(user_bo);
615 613
614 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
616 cmd->u.set.visible = 1; 615 cmd->u.set.visible = 1;
617 cmd->u.set.shape = qxl_bo_physical_address(qdev, 616 cmd->u.set.shape = qxl_bo_physical_address(qdev,
618 cursor_bo, 0); 617 cursor_bo, 0);
@@ -623,6 +622,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
623 if (ret) 622 if (ret)
624 goto out_free_release; 623 goto out_free_release;
625 624
625 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
626 cmd->type = QXL_CURSOR_MOVE; 626 cmd->type = QXL_CURSOR_MOVE;
627 } 627 }
628 628
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index f0cf99783c62..c97fbb2ab48b 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
776 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 776 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
777 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 777 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
778 778
779 /* disable mclk switching if the refresh is >120Hz, even if the
780 * blanking period would allow it
781 */
782 if (r600_dpm_get_vrefresh(rdev) > 120)
783 return true;
784
779 if (vblank_time < switch_limit) 785 if (vblank_time < switch_limit)
780 return true; 786 return true;
781 else 787 else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e368ce22bcc4..258912132b62 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7401 WREG32(DC_HPD5_INT_CONTROL, tmp); 7401 WREG32(DC_HPD5_INT_CONTROL, tmp);
7402 } 7402 }
7403 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { 7403 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
7404 tmp = RREG32(DC_HPD5_INT_CONTROL); 7404 tmp = RREG32(DC_HPD6_INT_CONTROL);
7405 tmp |= DC_HPDx_INT_ACK; 7405 tmp |= DC_HPDx_INT_ACK;
7406 WREG32(DC_HPD6_INT_CONTROL, tmp); 7406 WREG32(DC_HPD6_INT_CONTROL, tmp);
7407 } 7407 }
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7431 WREG32(DC_HPD5_INT_CONTROL, tmp); 7431 WREG32(DC_HPD5_INT_CONTROL, tmp);
7432 } 7432 }
7433 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 7433 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
7434 tmp = RREG32(DC_HPD5_INT_CONTROL); 7434 tmp = RREG32(DC_HPD6_INT_CONTROL);
7435 tmp |= DC_HPDx_RX_INT_ACK; 7435 tmp |= DC_HPDx_RX_INT_ACK;
7436 WREG32(DC_HPD6_INT_CONTROL, tmp); 7436 WREG32(DC_HPD6_INT_CONTROL, tmp);
7437 } 7437 }
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f130ec41ee4b..0bf103536404 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
4927 WREG32(DC_HPD5_INT_CONTROL, tmp); 4927 WREG32(DC_HPD5_INT_CONTROL, tmp);
4928 } 4928 }
4929 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 4929 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
4930 tmp = RREG32(DC_HPD5_INT_CONTROL); 4930 tmp = RREG32(DC_HPD6_INT_CONTROL);
4931 tmp |= DC_HPDx_INT_ACK; 4931 tmp |= DC_HPDx_INT_ACK;
4932 WREG32(DC_HPD6_INT_CONTROL, tmp); 4932 WREG32(DC_HPD6_INT_CONTROL, tmp);
4933 } 4933 }
@@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
4958 WREG32(DC_HPD5_INT_CONTROL, tmp); 4958 WREG32(DC_HPD5_INT_CONTROL, tmp);
4959 } 4959 }
4960 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 4960 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
4961 tmp = RREG32(DC_HPD5_INT_CONTROL); 4961 tmp = RREG32(DC_HPD6_INT_CONTROL);
4962 tmp |= DC_HPDx_RX_INT_ACK; 4962 tmp |= DC_HPDx_RX_INT_ACK;
4963 WREG32(DC_HPD6_INT_CONTROL, tmp); 4963 WREG32(DC_HPD6_INT_CONTROL, tmp);
4964 } 4964 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0a085176e79b..e06e2d8feab3 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
3988 WREG32(DC_HPD5_INT_CONTROL, tmp); 3988 WREG32(DC_HPD5_INT_CONTROL, tmp);
3989 } 3989 }
3990 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 3990 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3991 tmp = RREG32(DC_HPD5_INT_CONTROL); 3991 tmp = RREG32(DC_HPD6_INT_CONTROL);
3992 tmp |= DC_HPDx_INT_ACK; 3992 tmp |= DC_HPDx_INT_ACK;
3993 WREG32(DC_HPD6_INT_CONTROL, tmp); 3993 WREG32(DC_HPD6_INT_CONTROL, tmp);
3994 } 3994 }
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 6a68d440bc44..d0ad03674250 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
116 if ((radeon_runtime_pm != 0) && 116 if ((radeon_runtime_pm != 0) &&
117 radeon_has_atpx() && 117 radeon_has_atpx() &&
118 ((flags & RADEON_IS_IGP) == 0) && 118 ((flags & RADEON_IS_IGP) == 0) &&
119 !pci_is_thunderbolt_attached(rdev->pdev)) 119 !pci_is_thunderbolt_attached(dev->pdev))
120 flags |= RADEON_IS_PX; 120 flags |= RADEON_IS_PX;
121 121
122 /* radeon_device_init should report only fatal error 122 /* radeon_device_init should report only fatal error
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ceee87f029d9..76d1888528e6 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
6317 WREG32(DC_HPD5_INT_CONTROL, tmp); 6317 WREG32(DC_HPD5_INT_CONTROL, tmp);
6318 } 6318 }
6319 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 6319 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6320 tmp = RREG32(DC_HPD5_INT_CONTROL); 6320 tmp = RREG32(DC_HPD6_INT_CONTROL);
6321 tmp |= DC_HPDx_INT_ACK; 6321 tmp |= DC_HPDx_INT_ACK;
6322 WREG32(DC_HPD6_INT_CONTROL, tmp); 6322 WREG32(DC_HPD6_INT_CONTROL, tmp);
6323 } 6323 }
@@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
6348 WREG32(DC_HPD5_INT_CONTROL, tmp); 6348 WREG32(DC_HPD5_INT_CONTROL, tmp);
6349 } 6349 }
6350 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 6350 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
6351 tmp = RREG32(DC_HPD5_INT_CONTROL); 6351 tmp = RREG32(DC_HPD6_INT_CONTROL);
6352 tmp |= DC_HPDx_RX_INT_ACK; 6352 tmp |= DC_HPDx_RX_INT_ACK;
6353 WREG32(DC_HPD6_INT_CONTROL, tmp); 6353 WREG32(DC_HPD6_INT_CONTROL, tmp);
6354 } 6354 }