aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-16 10:22:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-16 10:22:42 -0400
commitcc7ce90153e74f8266eefee9fba466faa1a2d5df (patch)
tree7ebac4bc27c2d400aca256c0b557c561540543e2 /drivers/gpu
parent83f3ef3de625a5766de2382f9e077d4daafd5bac (diff)
parent8da0e1525b7f0d69c6cb44094963906282b32673 (diff)
Merge tag 'drm-next-2019-05-16' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie: "A bunch of fixes for the merge window closure, doesn't seem to be anything too major or serious in there. It does add TU117 turing modesetting to nouveau but it's just an enable for preexisting code. amdgpu: - gpu reset at load crash fix - ATPX hotplug fix for when dGPU is off - SR-IOV fixes radeon: - r5xx pll fixes i915: - GVT (MCHBAR, buffer alignment, misc warnings fixes) - Fixes for newly enabled semaphore code - Geminilake disable framebuffer compression - HSW edp fast modeset fix - IRQ vs RCU race fix nouveau: - Turing modesetting fixes - TU117 support msm: - SDM845 bringup fixes panfrost: - static checker fixes pl111: - spinlock init fix. bridge: - refresh rate register fix for adv7511" * tag 'drm-next-2019-05-16' of git://anongit.freedesktop.org/drm/drm: (36 commits) drm/msm: Upgrade gxpd checks to IS_ERR_OR_NULL drm/msm/dpu: Remove duplicate header drm/pl111: Initialize clock spinlock early drm/msm: correct attempted NULL pointer dereference in debugfs drm/msm: remove resv fields from msm_gem_object struct drm/nouveau: fix duplication of nv50_head_atom struct drm/nouveau/disp/dp: respect sink limits when selecting failsafe link configuration drm/nouveau/core: initial support for boards with TU117 chipset drm/nouveau/core: allow detected chipset to be overridden drm/nouveau/kms/gf119-gp10x: push HeadSetControlOutputResource() mthd when encoders change drm/nouveau/kms/nv50-: fix bug preventing non-vsync'd page flips drm/nouveau/kms/gv100-: fix spurious window immediate interlocks drm/bridge: adv7511: Fix low refresh rate selection drm/panfrost: Add missing _fini() calls in panfrost_device_fini() drm/panfrost: Only put sync_out if non-NULL drm/i915: Seal races between async GPU cancellation, retirement and signaling drm/i915: Fix fastset vs. pfit on/off on HSW EDP transcoder drm/i915/fbc: disable framebuffer compression on GeminiLake drm/amdgpu/psp: move psp version specific function pointers to early_init drm/radeon: prefer lower reference dividers ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h16
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_request.c60
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c78
-rw-r--r--drivers/gpu/drm/i915/intel_context.c1
-rw-r--r--drivers/gpu/drm/i915/intel_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c3
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
44 files changed, 387 insertions, 118 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4376b17ca594..56f8ca2a3bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
464 } 464 }
465 } 465 }
466 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { 466 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
467 if ((adev->flags & AMD_IS_PX) && 467 if (adev->flags & AMD_IS_PX) {
468 amdgpu_atpx_dgpu_req_power_for_displays()) {
469 pm_runtime_get_sync(adev->ddev->dev); 468 pm_runtime_get_sync(adev->ddev->dev);
470 /* Just fire off a uevent and let userspace tell us what to do */ 469 /* Just fire off a uevent and let userspace tell us what to do */
471 drm_helper_hpd_irq_event(adev->ddev); 470 drm_helper_hpd_irq_event(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 95144e49c7f9..34471dbaa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -342,6 +342,16 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
342 if (current_level == level) 342 if (current_level == level)
343 return count; 343 return count;
344 344
345 /* profile_exit setting is valid only when current mode is in profile mode */
346 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
347 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
348 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
349 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
350 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
351 pr_err("Currently not in any profile mode!\n");
352 return -EINVAL;
353 }
354
345 if (is_support_sw_smu(adev)) { 355 if (is_support_sw_smu(adev)) {
346 mutex_lock(&adev->pm.mutex); 356 mutex_lock(&adev->pm.mutex);
347 if (adev->pm.dpm.thermal_active) { 357 if (adev->pm.dpm.thermal_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 905cce1814f3..05897b05766b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
38static int psp_early_init(void *handle) 38static int psp_early_init(void *handle)
39{ 39{
40 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 40 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
41 struct psp_context *psp = &adev->psp;
41 42
42 psp_set_funcs(adev); 43 psp_set_funcs(adev);
43 44
44 return 0;
45}
46
47static int psp_sw_init(void *handle)
48{
49 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
50 struct psp_context *psp = &adev->psp;
51 int ret;
52
53 switch (adev->asic_type) { 45 switch (adev->asic_type) {
54 case CHIP_VEGA10: 46 case CHIP_VEGA10:
55 case CHIP_VEGA12: 47 case CHIP_VEGA12:
@@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
67 59
68 psp->adev = adev; 60 psp->adev = adev;
69 61
62 return 0;
63}
64
65static int psp_sw_init(void *handle)
66{
67 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
68 struct psp_context *psp = &adev->psp;
69 int ret;
70
70 ret = psp_init_microcode(psp); 71 ret = psp_init_microcode(psp);
71 if (ret) { 72 if (ret) {
72 DRM_ERROR("Failed to load psp firmware!\n"); 73 DRM_ERROR("Failed to load psp firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a07c85815b7a..4f10f5aba00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2757,6 +2757,37 @@ error_free_sched_entity:
2757} 2757}
2758 2758
2759/** 2759/**
2760 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2761 *
2762 * @adev: amdgpu_device pointer
2763 * @vm: the VM to check
2764 *
2765 * check all entries of the root PD, if any subsequent PDs are allocated,
2766 * it means there are page table creating and filling, and is no a clean
2767 * VM
2768 *
2769 * Returns:
2770 * 0 if this VM is clean
2771 */
2772static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2773 struct amdgpu_vm *vm)
2774{
2775 enum amdgpu_vm_level root = adev->vm_manager.root_level;
2776 unsigned int entries = amdgpu_vm_num_entries(adev, root);
2777 unsigned int i = 0;
2778
2779 if (!(vm->root.entries))
2780 return 0;
2781
2782 for (i = 0; i < entries; i++) {
2783 if (vm->root.entries[i].base.bo)
2784 return -EINVAL;
2785 }
2786
2787 return 0;
2788}
2789
2790/**
2760 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 2791 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2761 * 2792 *
2762 * @adev: amdgpu_device pointer 2793 * @adev: amdgpu_device pointer
@@ -2786,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
2786 return r; 2817 return r;
2787 2818
2788 /* Sanity checks */ 2819 /* Sanity checks */
2789 if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) { 2820 r = amdgpu_vm_check_clean_reserved(adev, vm);
2790 r = -EINVAL; 2821 if (r)
2791 goto unreserve_bo; 2822 goto unreserve_bo;
2792 }
2793 2823
2794 if (pasid) { 2824 if (pasid) {
2795 unsigned long flags; 2825 unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8dbad496b29f..2471e7cf75ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -372,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
372 if (amdgpu_sriov_runtime(adev)) 372 if (amdgpu_sriov_runtime(adev))
373 schedule_work(&adev->virt.flr_work); 373 schedule_work(&adev->virt.flr_work);
374 break; 374 break;
375 case IDH_QUERY_ALIVE:
376 xgpu_ai_mailbox_send_ack(adev);
377 break;
375 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 378 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
376 * it byfar since that polling thread will handle it, 379 * it byfar since that polling thread will handle it,
377 * other msg like flr complete is not handled here. 380 * other msg like flr complete is not handled here.
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 39d151b79153..077e91a33d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -49,6 +49,7 @@ enum idh_event {
49 IDH_FLR_NOTIFICATION_CMPL, 49 IDH_FLR_NOTIFICATION_CMPL,
50 IDH_SUCCESS, 50 IDH_SUCCESS,
51 IDH_FAIL, 51 IDH_FAIL,
52 IDH_QUERY_ALIVE,
52 IDH_EVENT_MAX 53 IDH_EVENT_MAX
53}; 54};
54 55
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index dc461df48da0..2191d3d0a219 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
787 0xFFFFFFFF, 0x00000004); 787 0xFFFFFFFF, 0x00000004);
788 /* mc resume*/ 788 /* mc resume*/
789 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 789 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
790 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 790 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
791 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); 791 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
792 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 792 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
793 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); 793 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
794 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
795 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
796 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
794 offset = 0; 797 offset = 0;
795 } else { 798 } else {
796 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 799 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
@@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
798 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 801 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
799 upper_32_bits(adev->uvd.inst[i].gpu_addr)); 802 upper_32_bits(adev->uvd.inst[i].gpu_addr));
800 offset = size; 803 offset = size;
804 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
805 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
806
801 } 807 }
802 808
803 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
804 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
805 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size); 809 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
806 810
807 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 811 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index f3f5938430d4..c0ec27991c22 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0); 244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0); 245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
246 246
247 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
247 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 248 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
249 uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
250 uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
251 uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
252
248 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 253 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
249 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), 254 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
250 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
251 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 255 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
252 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), 256 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
253 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff); 257 (tmr_mc_addr >> 40) & 0xff);
258 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
254 } else { 259 } else {
255 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 260 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
256 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), 261 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
@@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
258 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 263 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
259 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), 264 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
260 (adev->vce.gpu_addr >> 40) & 0xff); 265 (adev->vce.gpu_addr >> 40) & 0xff);
266 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
267 offset & ~0x0f000000);
268
261 } 269 }
262 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 270 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
263 mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), 271 mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
@@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
272 mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), 280 mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
273 (adev->vce.gpu_addr >> 40) & 0xff); 281 (adev->vce.gpu_addr >> 40) & 0xff);
274 282
275 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
276 size = VCE_V4_0_FW_SIZE; 283 size = VCE_V4_0_FW_SIZE;
277 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
278 offset & ~0x0f000000);
279 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size); 284 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
280 285
281 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0; 286 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 1b2f69a9a24e..8d89ab7f0ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -31,7 +31,7 @@
31#include "soc15_common.h" 31#include "soc15_common.h"
32#include "vega10_ih.h" 32#include "vega10_ih.h"
33 33
34 34#define MAX_REARM_RETRY 10
35 35
36static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev); 36static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
37 37
@@ -382,6 +382,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
382} 382}
383 383
384/** 384/**
385 * vega10_ih_irq_rearm - rearm IRQ if lost
386 *
387 * @adev: amdgpu_device pointer
388 *
389 */
390static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
391 struct amdgpu_ih_ring *ih)
392{
393 uint32_t reg_rptr = 0;
394 uint32_t v = 0;
395 uint32_t i = 0;
396
397 if (ih == &adev->irq.ih)
398 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
399 else if (ih == &adev->irq.ih1)
400 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
401 else if (ih == &adev->irq.ih2)
402 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
403 else
404 return;
405
406 /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
407 for (i = 0; i < MAX_REARM_RETRY; i++) {
408 v = RREG32_NO_KIQ(reg_rptr);
409 if ((v < ih->ring_size) && (v != ih->rptr))
410 WDOORBELL32(ih->doorbell_index, ih->rptr);
411 else
412 break;
413 }
414}
415
416/**
385 * vega10_ih_set_rptr - set the IH ring buffer rptr 417 * vega10_ih_set_rptr - set the IH ring buffer rptr
386 * 418 *
387 * @adev: amdgpu_device pointer 419 * @adev: amdgpu_device pointer
@@ -395,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
395 /* XXX check if swapping is necessary on BE */ 427 /* XXX check if swapping is necessary on BE */
396 *ih->rptr_cpu = ih->rptr; 428 *ih->rptr_cpu = ih->rptr;
397 WDOORBELL32(ih->doorbell_index, ih->rptr); 429 WDOORBELL32(ih->doorbell_index, ih->rptr);
430
431 if (amdgpu_sriov_vf(adev))
432 vega10_ih_irq_rearm(adev, ih);
398 } else if (ih == &adev->irq.ih) { 433 } else if (ih == &adev->irq.ih) {
399 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); 434 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
400 } else if (ih == &adev->irq.ih1) { 435 } else if (ih == &adev->irq.ih1) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1854506e3e8f..995f9df66142 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5242,7 +5242,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5242 struct drm_crtc *pcrtc, 5242 struct drm_crtc *pcrtc,
5243 bool wait_for_vblank) 5243 bool wait_for_vblank)
5244{ 5244{
5245 uint32_t i, r; 5245 uint32_t i;
5246 uint64_t timestamp_ns; 5246 uint64_t timestamp_ns;
5247 struct drm_plane *plane; 5247 struct drm_plane *plane;
5248 struct drm_plane_state *old_plane_state, *new_plane_state; 5248 struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -5253,6 +5253,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5253 struct dm_crtc_state *dm_old_crtc_state = 5253 struct dm_crtc_state *dm_old_crtc_state =
5254 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 5254 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
5255 int planes_count = 0, vpos, hpos; 5255 int planes_count = 0, vpos, hpos;
5256 long r;
5256 unsigned long flags; 5257 unsigned long flags;
5257 struct amdgpu_bo *abo; 5258 struct amdgpu_bo *abo;
5258 uint64_t tiling_flags; 5259 uint64_t tiling_flags;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec2ca71e1323..c532e9c9e491 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -748,11 +748,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
748 vsync_polarity = 1; 748 vsync_polarity = 1;
749 } 749 }
750 750
751 if (mode->vrefresh <= 24000) 751 if (drm_mode_vrefresh(mode) <= 24)
752 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; 752 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
753 else if (mode->vrefresh <= 25000) 753 else if (drm_mode_vrefresh(mode) <= 25)
754 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; 754 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
755 else if (mode->vrefresh <= 30000) 755 else if (drm_mode_vrefresh(mode) <= 30)
756 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; 756 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
757 else 757 else
758 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; 758 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2ec89bcb59f1..8a9606f91e68 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
196int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) 196int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
197{ 197{
198 struct dentry *ent; 198 struct dentry *ent;
199 char name[10] = ""; 199 char name[16] = "";
200 200
201 sprintf(name, "vgpu%d", vgpu->id); 201 snprintf(name, 16, "vgpu%d", vgpu->id);
202 vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); 202 vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
203 if (!vgpu->debugfs) 203 if (!vgpu->debugfs)
204 return -ENOMEM; 204 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 4e1e425189ba..41c8ebc60c63 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -45,6 +45,7 @@ static int vgpu_gem_get_pages(
45 int i, ret; 45 int i, ret;
46 gen8_pte_t __iomem *gtt_entries; 46 gen8_pte_t __iomem *gtt_entries;
47 struct intel_vgpu_fb_info *fb_info; 47 struct intel_vgpu_fb_info *fb_info;
48 u32 page_num;
48 49
49 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; 50 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
50 if (WARN_ON(!fb_info)) 51 if (WARN_ON(!fb_info))
@@ -54,14 +55,15 @@ static int vgpu_gem_get_pages(
54 if (unlikely(!st)) 55 if (unlikely(!st))
55 return -ENOMEM; 56 return -ENOMEM;
56 57
57 ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL); 58 page_num = obj->base.size >> PAGE_SHIFT;
59 ret = sg_alloc_table(st, page_num, GFP_KERNEL);
58 if (ret) { 60 if (ret) {
59 kfree(st); 61 kfree(st);
60 return ret; 62 return ret;
61 } 63 }
62 gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + 64 gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
63 (fb_info->start >> PAGE_SHIFT); 65 (fb_info->start >> PAGE_SHIFT);
64 for_each_sg(st->sgl, sg, fb_info->size, i) { 66 for_each_sg(st->sgl, sg, page_num, i) {
65 sg->offset = 0; 67 sg->offset = 0;
66 sg->length = PAGE_SIZE; 68 sg->length = PAGE_SIZE;
67 sg_dma_address(sg) = 69 sg_dma_address(sg) =
@@ -158,7 +160,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
158 return NULL; 160 return NULL;
159 161
160 drm_gem_private_object_init(dev, &obj->base, 162 drm_gem_private_object_init(dev, &obj->base,
161 info->size << PAGE_SHIFT); 163 roundup(info->size, PAGE_SIZE));
162 i915_gem_object_init(obj, &intel_vgpu_gem_ops); 164 i915_gem_object_init(obj, &intel_vgpu_gem_ops);
163 165
164 obj->read_domains = I915_GEM_DOMAIN_GTT; 166 obj->read_domains = I915_GEM_DOMAIN_GTT;
@@ -206,11 +208,12 @@ static int vgpu_get_plane_info(struct drm_device *dev,
206 struct intel_vgpu_fb_info *info, 208 struct intel_vgpu_fb_info *info,
207 int plane_id) 209 int plane_id)
208{ 210{
209 struct drm_i915_private *dev_priv = to_i915(dev);
210 struct intel_vgpu_primary_plane_format p; 211 struct intel_vgpu_primary_plane_format p;
211 struct intel_vgpu_cursor_plane_format c; 212 struct intel_vgpu_cursor_plane_format c;
212 int ret, tile_height = 1; 213 int ret, tile_height = 1;
213 214
215 memset(info, 0, sizeof(*info));
216
214 if (plane_id == DRM_PLANE_TYPE_PRIMARY) { 217 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
215 ret = intel_vgpu_decode_primary_plane(vgpu, &p); 218 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
216 if (ret) 219 if (ret)
@@ -267,8 +270,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
267 return -EINVAL; 270 return -EINVAL;
268 } 271 }
269 272
270 info->size = (info->stride * roundup(info->height, tile_height) 273 info->size = info->stride * roundup(info->height, tile_height);
271 + PAGE_SIZE - 1) >> PAGE_SHIFT;
272 if (info->size == 0) { 274 if (info->size == 0) {
273 gvt_vgpu_err("fb size is zero\n"); 275 gvt_vgpu_err("fb size is zero\n");
274 return -EINVAL; 276 return -EINVAL;
@@ -278,11 +280,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
278 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); 280 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
279 return -EFAULT; 281 return -EFAULT;
280 } 282 }
281 if (((info->start >> PAGE_SHIFT) + info->size) >
282 ggtt_total_entries(&dev_priv->ggtt)) {
283 gvt_vgpu_err("Invalid GTT offset or size\n");
284 return -EFAULT;
285 }
286 283
287 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) { 284 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
288 gvt_vgpu_err("invalid gma addr\n"); 285 gvt_vgpu_err("invalid gma addr\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c2f7d20f6346..08c74e65836b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -811,7 +811,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
811 811
812/* Allocate shadow page table without guest page. */ 812/* Allocate shadow page table without guest page. */
813static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( 813static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
814 struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) 814 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
815{ 815{
816 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 816 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
817 struct intel_vgpu_ppgtt_spt *spt = NULL; 817 struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -861,7 +861,7 @@ err_free_spt:
861 861
862/* Allocate shadow page table associated with specific gfn. */ 862/* Allocate shadow page table associated with specific gfn. */
863static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( 863static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
864 struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, 864 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
865 unsigned long gfn, bool guest_pde_ips) 865 unsigned long gfn, bool guest_pde_ips)
866{ 866{
867 struct intel_vgpu_ppgtt_spt *spt; 867 struct intel_vgpu_ppgtt_spt *spt;
@@ -936,7 +936,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
936{ 936{
937 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 937 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
938 struct intel_vgpu_ppgtt_spt *s; 938 struct intel_vgpu_ppgtt_spt *s;
939 intel_gvt_gtt_type_t cur_pt_type; 939 enum intel_gvt_gtt_type cur_pt_type;
940 940
941 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); 941 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
942 942
@@ -1076,6 +1076,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1076 } else { 1076 } else {
1077 int type = get_next_pt_type(we->type); 1077 int type = get_next_pt_type(we->type);
1078 1078
1079 if (!gtt_type_is_pt(type))
1080 goto err;
1081
1079 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); 1082 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1080 if (IS_ERR(spt)) { 1083 if (IS_ERR(spt)) {
1081 ret = PTR_ERR(spt); 1084 ret = PTR_ERR(spt);
@@ -1855,7 +1858,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1855 * Zero on success, negative error code in pointer if failed. 1858 * Zero on success, negative error code in pointer if failed.
1856 */ 1859 */
1857struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 1860struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1858 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 1861 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1859{ 1862{
1860 struct intel_gvt *gvt = vgpu->gvt; 1863 struct intel_gvt *gvt = vgpu->gvt;
1861 struct intel_vgpu_mm *mm; 1864 struct intel_vgpu_mm *mm;
@@ -2309,7 +2312,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2309} 2312}
2310 2313
2311static int alloc_scratch_pages(struct intel_vgpu *vgpu, 2314static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2312 intel_gvt_gtt_type_t type) 2315 enum intel_gvt_gtt_type type)
2313{ 2316{
2314 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2317 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2315 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2318 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -2594,7 +2597,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2594 * Zero on success, negative error code if failed. 2597 * Zero on success, negative error code if failed.
2595 */ 2598 */
2596struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 2599struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2597 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 2600 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2598{ 2601{
2599 struct intel_vgpu_mm *mm; 2602 struct intel_vgpu_mm *mm;
2600 2603
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 32c573aea494..42d0394f0de2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -95,8 +95,8 @@ struct intel_gvt_gtt {
95 unsigned long scratch_mfn; 95 unsigned long scratch_mfn;
96}; 96};
97 97
98typedef enum { 98enum intel_gvt_gtt_type {
99 GTT_TYPE_INVALID = -1, 99 GTT_TYPE_INVALID = 0,
100 100
101 GTT_TYPE_GGTT_PTE, 101 GTT_TYPE_GGTT_PTE,
102 102
@@ -124,7 +124,7 @@ typedef enum {
124 GTT_TYPE_PPGTT_PML4_PT, 124 GTT_TYPE_PPGTT_PML4_PT,
125 125
126 GTT_TYPE_MAX, 126 GTT_TYPE_MAX,
127} intel_gvt_gtt_type_t; 127};
128 128
129enum intel_gvt_mm_type { 129enum intel_gvt_mm_type {
130 INTEL_GVT_MM_GGTT, 130 INTEL_GVT_MM_GGTT,
@@ -148,7 +148,7 @@ struct intel_vgpu_mm {
148 148
149 union { 149 union {
150 struct { 150 struct {
151 intel_gvt_gtt_type_t root_entry_type; 151 enum intel_gvt_gtt_type root_entry_type;
152 /* 152 /*
153 * The 4 PDPs in ring context. For 48bit addressing, 153 * The 4 PDPs in ring context. For 48bit addressing,
154 * only PDP0 is valid and point to PML4. For 32it 154 * only PDP0 is valid and point to PML4. For 32it
@@ -169,7 +169,7 @@ struct intel_vgpu_mm {
169}; 169};
170 170
171struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 171struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
172 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); 172 enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
173 173
174static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm) 174static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
175{ 175{
@@ -233,7 +233,7 @@ struct intel_vgpu_ppgtt_spt {
233 struct intel_vgpu *vgpu; 233 struct intel_vgpu *vgpu;
234 234
235 struct { 235 struct {
236 intel_gvt_gtt_type_t type; 236 enum intel_gvt_gtt_type type;
237 bool pde_ips; /* for 64KB PTEs */ 237 bool pde_ips; /* for 64KB PTEs */
238 void *vaddr; 238 void *vaddr;
239 struct page *page; 239 struct page *page;
@@ -241,7 +241,7 @@ struct intel_vgpu_ppgtt_spt {
241 } shadow_page; 241 } shadow_page;
242 242
243 struct { 243 struct {
244 intel_gvt_gtt_type_t type; 244 enum intel_gvt_gtt_type type;
245 bool pde_ips; /* for 64KB PTEs */ 245 bool pde_ips; /* for 64KB PTEs */
246 unsigned long gfn; 246 unsigned long gfn;
247 unsigned long write_cnt; 247 unsigned long write_cnt;
@@ -267,7 +267,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
267 u64 pdps[]); 267 u64 pdps[]);
268 268
269struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 269struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
270 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); 270 enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
271 271
272int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]); 272int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
273 273
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 18f01eeb2510..90673fca792f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1206,7 +1206,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1206 1206
1207static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) 1207static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1208{ 1208{
1209 intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1209 enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1210 struct intel_vgpu_mm *mm; 1210 struct intel_vgpu_mm *mm;
1211 u64 *pdps; 1211 u64 *pdps;
1212 1212
@@ -3303,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
3303/* Special MMIO blocks. */ 3303/* Special MMIO blocks. */
3304static struct gvt_mmio_block mmio_blocks[] = { 3304static struct gvt_mmio_block mmio_blocks[] = {
3305 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, 3305 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
3306 {D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL}, 3306 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
3307 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, 3307 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
3308 pvinfo_mmio_read, pvinfo_mmio_write}, 3308 pvinfo_mmio_read, pvinfo_mmio_write},
3309 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, 3309 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index e7e14c842be4..edf6d646eb25 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
132 132
133 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 133 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
134 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ 134 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
135 {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
135 136
136 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 137 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
137 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 138 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 3de5b643b266..33aaa14bfdde 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -126,7 +126,4 @@
126#define RING_GFX_MODE(base) _MMIO((base) + 0x29c) 126#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
127#define VF_GUARDBAND _MMIO(0x83a4) 127#define VF_GUARDBAND _MMIO(0x83a4)
128 128
129/* define the effective range of MCHBAR register on Sandybridge+ */
130#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
131
132#endif 129#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 8998fa5ab198..7c99bbc3e2b8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1343,7 +1343,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
1343 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 1343 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1344 struct intel_vgpu_mm *mm; 1344 struct intel_vgpu_mm *mm;
1345 struct intel_vgpu *vgpu = workload->vgpu; 1345 struct intel_vgpu *vgpu = workload->vgpu;
1346 intel_gvt_gtt_type_t root_entry_type; 1346 enum intel_gvt_gtt_type root_entry_type;
1347 u64 pdps[GVT_RING_CTX_NR_PDPS]; 1347 u64 pdps[GVT_RING_CTX_NR_PDPS];
1348 1348
1349 switch (desc->addressing_mode) { 1349 switch (desc->addressing_mode) {
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b836721d3b13..f6c78c0fa74b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -425,6 +425,26 @@ void __i915_request_submit(struct i915_request *request)
425 if (i915_gem_context_is_banned(request->gem_context)) 425 if (i915_gem_context_is_banned(request->gem_context))
426 i915_request_skip(request, -EIO); 426 i915_request_skip(request, -EIO);
427 427
428 /*
429 * Are we using semaphores when the gpu is already saturated?
430 *
431 * Using semaphores incurs a cost in having the GPU poll a
432 * memory location, busywaiting for it to change. The continual
433 * memory reads can have a noticeable impact on the rest of the
434 * system with the extra bus traffic, stalling the cpu as it too
435 * tries to access memory across the bus (perf stat -e bus-cycles).
436 *
437 * If we installed a semaphore on this request and we only submit
438 * the request after the signaler completed, that indicates the
439 * system is overloaded and using semaphores at this time only
440 * increases the amount of work we are doing. If so, we disable
441 * further use of semaphores until we are idle again, whence we
442 * optimistically try again.
443 */
444 if (request->sched.semaphores &&
445 i915_sw_fence_signaled(&request->semaphore))
446 request->hw_context->saturated |= request->sched.semaphores;
447
428 /* We may be recursing from the signal callback of another i915 fence */ 448 /* We may be recursing from the signal callback of another i915 fence */
429 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 449 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
430 450
@@ -432,6 +452,7 @@ void __i915_request_submit(struct i915_request *request)
432 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 452 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
433 453
434 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && 454 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
455 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
435 !i915_request_enable_breadcrumb(request)) 456 !i915_request_enable_breadcrumb(request))
436 intel_engine_queue_breadcrumbs(engine); 457 intel_engine_queue_breadcrumbs(engine);
437 458
@@ -799,6 +820,39 @@ err_unreserve:
799} 820}
800 821
801static int 822static int
823i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
824{
825 if (list_is_first(&signal->ring_link, &signal->ring->request_list))
826 return 0;
827
828 signal = list_prev_entry(signal, ring_link);
829 if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
830 return 0;
831
832 return i915_sw_fence_await_dma_fence(&rq->submit,
833 &signal->fence, 0,
834 I915_FENCE_GFP);
835}
836
837static intel_engine_mask_t
838already_busywaiting(struct i915_request *rq)
839{
840 /*
841 * Polling a semaphore causes bus traffic, delaying other users of
842 * both the GPU and CPU. We want to limit the impact on others,
843 * while taking advantage of early submission to reduce GPU
844 * latency. Therefore we restrict ourselves to not using more
845 * than one semaphore from each source, and not using a semaphore
846 * if we have detected the engine is saturated (i.e. would not be
847 * submitted early and cause bus traffic reading an already passed
848 * semaphore).
849 *
850 * See the are-we-too-late? check in __i915_request_submit().
851 */
852 return rq->sched.semaphores | rq->hw_context->saturated;
853}
854
855static int
802emit_semaphore_wait(struct i915_request *to, 856emit_semaphore_wait(struct i915_request *to,
803 struct i915_request *from, 857 struct i915_request *from,
804 gfp_t gfp) 858 gfp_t gfp)
@@ -811,11 +865,15 @@ emit_semaphore_wait(struct i915_request *to,
811 GEM_BUG_ON(INTEL_GEN(to->i915) < 8); 865 GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
812 866
813 /* Just emit the first semaphore we see as request space is limited. */ 867 /* Just emit the first semaphore we see as request space is limited. */
814 if (to->sched.semaphores & from->engine->mask) 868 if (already_busywaiting(to) & from->engine->mask)
815 return i915_sw_fence_await_dma_fence(&to->submit, 869 return i915_sw_fence_await_dma_fence(&to->submit,
816 &from->fence, 0, 870 &from->fence, 0,
817 I915_FENCE_GFP); 871 I915_FENCE_GFP);
818 872
873 err = i915_request_await_start(to, from);
874 if (err < 0)
875 return err;
876
819 err = i915_sw_fence_await_dma_fence(&to->semaphore, 877 err = i915_sw_fence_await_dma_fence(&to->semaphore,
820 &from->fence, 0, 878 &from->fence, 0,
821 I915_FENCE_GFP); 879 I915_FENCE_GFP);
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 3cbffd400b1b..832cb6b1e9bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kthread.h> 25#include <linux/kthread.h>
26#include <trace/events/dma_fence.h>
26#include <uapi/linux/sched/types.h> 27#include <uapi/linux/sched/types.h>
27 28
28#include "i915_drv.h" 29#include "i915_drv.h"
@@ -80,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq)
80 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); 81 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
81} 82}
82 83
84static bool
85__dma_fence_signal(struct dma_fence *fence)
86{
87 return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
88}
89
90static void
91__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
92{
93 fence->timestamp = timestamp;
94 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
95 trace_dma_fence_signaled(fence);
96}
97
98static void
99__dma_fence_signal__notify(struct dma_fence *fence)
100{
101 struct dma_fence_cb *cur, *tmp;
102
103 lockdep_assert_held(fence->lock);
104 lockdep_assert_irqs_disabled();
105
106 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
107 INIT_LIST_HEAD(&cur->node);
108 cur->func(fence, cur);
109 }
110 INIT_LIST_HEAD(&fence->cb_list);
111}
112
83void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) 113void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
84{ 114{
85 struct intel_breadcrumbs *b = &engine->breadcrumbs; 115 struct intel_breadcrumbs *b = &engine->breadcrumbs;
116 const ktime_t timestamp = ktime_get();
86 struct intel_context *ce, *cn; 117 struct intel_context *ce, *cn;
87 struct list_head *pos, *next; 118 struct list_head *pos, *next;
88 LIST_HEAD(signal); 119 LIST_HEAD(signal);
@@ -104,6 +135,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
104 135
105 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, 136 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
106 &rq->fence.flags)); 137 &rq->fence.flags));
138 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
139
140 if (!__dma_fence_signal(&rq->fence))
141 continue;
107 142
108 /* 143 /*
109 * Queue for execution after dropping the signaling 144 * Queue for execution after dropping the signaling
@@ -111,14 +146,6 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
111 * more signalers to the same context or engine. 146 * more signalers to the same context or engine.
112 */ 147 */
113 i915_request_get(rq); 148 i915_request_get(rq);
114
115 /*
116 * We may race with direct invocation of
117 * dma_fence_signal(), e.g. i915_request_retire(),
118 * so we need to acquire our reference to the request
119 * before we cancel the breadcrumb.
120 */
121 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
122 list_add_tail(&rq->signal_link, &signal); 149 list_add_tail(&rq->signal_link, &signal);
123 } 150 }
124 151
@@ -141,7 +168,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
141 struct i915_request *rq = 168 struct i915_request *rq =
142 list_entry(pos, typeof(*rq), signal_link); 169 list_entry(pos, typeof(*rq), signal_link);
143 170
144 dma_fence_signal(&rq->fence); 171 __dma_fence_signal__timestamp(&rq->fence, timestamp);
172
173 spin_lock(&rq->lock);
174 __dma_fence_signal__notify(&rq->fence);
175 spin_unlock(&rq->lock);
176
145 i915_request_put(rq); 177 i915_request_put(rq);
146 } 178 }
147} 179}
@@ -243,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
243 275
244bool i915_request_enable_breadcrumb(struct i915_request *rq) 276bool i915_request_enable_breadcrumb(struct i915_request *rq)
245{ 277{
246 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; 278 lockdep_assert_held(&rq->lock);
247 279 lockdep_assert_irqs_disabled();
248 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
249 280
250 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) 281 if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
251 return true; 282 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
252
253 spin_lock(&b->irq_lock);
254 if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
255 !__request_completed(rq)) {
256 struct intel_context *ce = rq->hw_context; 283 struct intel_context *ce = rq->hw_context;
257 struct list_head *pos; 284 struct list_head *pos;
258 285
286 spin_lock(&b->irq_lock);
287 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
288
259 __intel_breadcrumbs_arm_irq(b); 289 __intel_breadcrumbs_arm_irq(b);
260 290
261 /* 291 /*
@@ -284,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
284 list_move_tail(&ce->signal_link, &b->signalers); 314 list_move_tail(&ce->signal_link, &b->signalers);
285 315
286 set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); 316 set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
317 spin_unlock(&b->irq_lock);
287 } 318 }
288 spin_unlock(&b->irq_lock);
289 319
290 return !__request_completed(rq); 320 return !__request_completed(rq);
291} 321}
@@ -294,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
294{ 324{
295 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; 325 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
296 326
297 if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) 327 lockdep_assert_held(&rq->lock);
298 return; 328 lockdep_assert_irqs_disabled();
299 329
330 /*
331 * We must wait for b->irq_lock so that we know the interrupt handler
332 * has released its reference to the intel_context and has completed
333 * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
334 * required).
335 */
300 spin_lock(&b->irq_lock); 336 spin_lock(&b->irq_lock);
301 if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { 337 if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
302 struct intel_context *ce = rq->hw_context; 338 struct intel_context *ce = rq->hw_context;
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
index 8931e0fee873..924cc556223a 100644
--- a/drivers/gpu/drm/i915/intel_context.c
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -230,6 +230,7 @@ intel_context_init(struct intel_context *ce,
230 ce->gem_context = ctx; 230 ce->gem_context = ctx;
231 ce->engine = engine; 231 ce->engine = engine;
232 ce->ops = engine->cops; 232 ce->ops = engine->cops;
233 ce->saturated = 0;
233 234
234 INIT_LIST_HEAD(&ce->signal_link); 235 INIT_LIST_HEAD(&ce->signal_link);
235 INIT_LIST_HEAD(&ce->signals); 236 INIT_LIST_HEAD(&ce->signals);
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
index 68b4ca1611e0..339c7437fe82 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15 15
16#include "i915_active_types.h" 16#include "i915_active_types.h"
17#include "intel_engine_types.h"
17 18
18struct i915_gem_context; 19struct i915_gem_context;
19struct i915_vma; 20struct i915_vma;
@@ -58,6 +59,8 @@ struct intel_context {
58 atomic_t pin_count; 59 atomic_t pin_count;
59 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ 60 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
60 61
62 intel_engine_mask_t saturated; /* submitting semaphores too late? */
63
61 /** 64 /**
62 * active_tracker: Active tracker for the external rq activity 65 * active_tracker: Active tracker for the external rq activity
63 * on this intel_context object. 66 * on this intel_context object.
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3bd40a4a6739..5098228f1302 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12082,6 +12082,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12082 struct intel_crtc_state *pipe_config, 12082 struct intel_crtc_state *pipe_config,
12083 bool adjust) 12083 bool adjust)
12084{ 12084{
12085 struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
12085 bool ret = true; 12086 bool ret = true;
12086 bool fixup_inherited = adjust && 12087 bool fixup_inherited = adjust &&
12087 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && 12088 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
@@ -12303,6 +12304,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12303 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 12304 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12304 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12305 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12305 12306
12307 /*
12308 * Changing the EDP transcoder input mux
12309 * (A_ONOFF vs. A_ON) requires a full modeset.
12310 */
12311 if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
12312 current_config->cpu_transcoder == TRANSCODER_EDP)
12313 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12314
12306 if (!adjust) { 12315 if (!adjust) {
12307 PIPE_CONF_CHECK_I(pipe_src_w); 12316 PIPE_CONF_CHECK_I(pipe_src_w);
12308 PIPE_CONF_CHECK_I(pipe_src_h); 12317 PIPE_CONF_CHECK_I(pipe_src_h);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index c805a0966395..5679f2fffb7c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1280,6 +1280,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1280 if (!HAS_FBC(dev_priv)) 1280 if (!HAS_FBC(dev_priv))
1281 return 0; 1281 return 0;
1282 1282
1283 /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
1284 if (IS_GEMINILAKE(dev_priv))
1285 return 0;
1286
1283 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) 1287 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1284 return 1; 1288 return 1;
1285 1289
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 37f60cb8e9e1..46cd0e70aecb 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <linux/circ_buf.h> 25#include <linux/circ_buf.h>
26#include <trace/events/dma_fence.h>
27 26
28#include "intel_guc_submission.h" 27#include "intel_guc_submission.h"
29#include "intel_lrc_reg.h" 28#include "intel_lrc_reg.h"
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index e94b5b1bc1b7..e7c7be4911c1 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -311,10 +311,17 @@ retry:
311 pipe_config->base.mode_changed = pipe_config->has_psr; 311 pipe_config->base.mode_changed = pipe_config->has_psr;
312 pipe_config->crc_enabled = enable; 312 pipe_config->crc_enabled = enable;
313 313
314 if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) { 314 if (IS_HASWELL(dev_priv) &&
315 pipe_config->base.active && crtc->pipe == PIPE_A &&
316 pipe_config->cpu_transcoder == TRANSCODER_EDP) {
317 bool old_need_power_well = pipe_config->pch_pfit.enabled ||
318 pipe_config->pch_pfit.force_thru;
319 bool new_need_power_well = pipe_config->pch_pfit.enabled ||
320 enable;
321
315 pipe_config->pch_pfit.force_thru = enable; 322 pipe_config->pch_pfit.force_thru = enable;
316 if (pipe_config->cpu_transcoder == TRANSCODER_EDP && 323
317 pipe_config->pch_pfit.enabled != enable) 324 if (old_need_power_well != new_need_power_well)
318 pipe_config->base.connectors_changed = true; 325 pipe_config->base.connectors_changed = true;
319 } 326 }
320 327
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 9155dafae2a9..38e2cfa9cec7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -747,7 +747,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
747 * will make sure that the refcounting is correct in case we need to 747 * will make sure that the refcounting is correct in case we need to
748 * bring down the GX after a GMU failure 748 * bring down the GX after a GMU failure
749 */ 749 */
750 if (!IS_ERR(gmu->gxpd)) 750 if (!IS_ERR_OR_NULL(gmu->gxpd))
751 pm_runtime_get(gmu->gxpd); 751 pm_runtime_get(gmu->gxpd);
752 752
753out: 753out:
@@ -863,7 +863,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
863 * domain. Usually the GMU does this but only if the shutdown sequence 863 * domain. Usually the GMU does this but only if the shutdown sequence
864 * was successful 864 * was successful
865 */ 865 */
866 if (!IS_ERR(gmu->gxpd)) 866 if (!IS_ERR_OR_NULL(gmu->gxpd))
867 pm_runtime_put_sync(gmu->gxpd); 867 pm_runtime_put_sync(gmu->gxpd);
868 868
869 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 869 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
@@ -1234,7 +1234,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1234 1234
1235 pm_runtime_disable(gmu->dev); 1235 pm_runtime_disable(gmu->dev);
1236 1236
1237 if (!IS_ERR(gmu->gxpd)) { 1237 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1238 pm_runtime_disable(gmu->gxpd); 1238 pm_runtime_disable(gmu->gxpd);
1239 dev_pm_domain_detach(gmu->gxpd, false); 1239 dev_pm_domain_detach(gmu->gxpd, false);
1240 } 1240 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 018df2c3b7ed..45a5bc6ede5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
15#include "dpu_hwio.h" 15#include "dpu_hwio.h"
16#include "dpu_hw_lm.h" 16#include "dpu_hw_lm.h"
17#include "dpu_hw_mdss.h" 17#include "dpu_hw_mdss.h"
18#include "dpu_kms.h"
19 18
20#define LM_OP_MODE 0x00 19#define LM_OP_MODE 0x00
21#define LM_OUT_SIZE 0x04 20#define LM_OUT_SIZE 0x04
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index da1f727d7495..ce1a555e1f31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -780,7 +780,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
780 struct dpu_plane_state *pstate = to_dpu_plane_state(new_state); 780 struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
781 struct dpu_hw_fmt_layout layout; 781 struct dpu_hw_fmt_layout layout;
782 struct drm_gem_object *obj; 782 struct drm_gem_object *obj;
783 struct msm_gem_object *msm_obj;
784 struct dma_fence *fence; 783 struct dma_fence *fence;
785 struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); 784 struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
786 int ret; 785 int ret;
@@ -799,8 +798,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
799 * implicit fence and fb prepare by hand here. 798 * implicit fence and fb prepare by hand here.
800 */ 799 */
801 obj = msm_framebuffer_bo(new_state->fb, 0); 800 obj = msm_framebuffer_bo(new_state->fb, 0);
802 msm_obj = to_msm_bo(obj); 801 fence = reservation_object_get_excl_rcu(obj->resv);
803 fence = reservation_object_get_excl_rcu(msm_obj->resv);
804 if (fence) 802 if (fence)
805 drm_atomic_set_fence_for_plane(new_state, fence); 803 drm_atomic_set_fence_for_plane(new_state, fence);
806 804
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f5b1256e32b6..131c23a267ee 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -49,15 +49,13 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
49 struct msm_drm_private *priv = plane->dev->dev_private; 49 struct msm_drm_private *priv = plane->dev->dev_private;
50 struct msm_kms *kms = priv->kms; 50 struct msm_kms *kms = priv->kms;
51 struct drm_gem_object *obj; 51 struct drm_gem_object *obj;
52 struct msm_gem_object *msm_obj;
53 struct dma_fence *fence; 52 struct dma_fence *fence;
54 53
55 if (!new_state->fb) 54 if (!new_state->fb)
56 return 0; 55 return 0;
57 56
58 obj = msm_framebuffer_bo(new_state->fb, 0); 57 obj = msm_framebuffer_bo(new_state->fb, 0);
59 msm_obj = to_msm_bo(obj); 58 fence = reservation_object_get_excl_rcu(obj->resv);
60 fence = reservation_object_get_excl_rcu(msm_obj->resv);
61 59
62 drm_atomic_set_fence_for_plane(new_state, fence); 60 drm_atomic_set_fence_for_plane(new_state, fence);
63 61
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 31d5a744d84f..35f55dd25994 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -803,7 +803,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
803 seq_puts(m, " vmas:"); 803 seq_puts(m, " vmas:");
804 804
805 list_for_each_entry(vma, &msm_obj->vmas, list) 805 list_for_each_entry(vma, &msm_obj->vmas, list)
806 seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name, 806 seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
807 vma->aspace != NULL ? vma->aspace->name : NULL,
807 vma->iova, vma->mapped ? "mapped" : "unmapped", 808 vma->iova, vma->mapped ? "mapped" : "unmapped",
808 vma->inuse); 809 vma->inuse);
809 810
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5ac781dffee..812d1b1369a5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -86,10 +86,6 @@ struct msm_gem_object {
86 86
87 struct llist_node freed; 87 struct llist_node freed;
88 88
89 /* normally (resv == &_resv) except for imported bo's */
90 struct reservation_object *resv;
91 struct reservation_object _resv;
92
93 /* For physically contiguous buffers. Used when we don't have 89 /* For physically contiguous buffers. Used when we don't have
94 * an IOMMU. Also used for stolen/splashscreen buffer. 90 * an IOMMU. Also used for stolen/splashscreen buffer.
95 */ 91 */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 2216c58620c2..7c41b0599d1a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -41,6 +41,7 @@ struct nv50_disp_interlock {
41 NV50_DISP_INTERLOCK__SIZE 41 NV50_DISP_INTERLOCK__SIZE
42 } type; 42 } type;
43 u32 data; 43 u32 data;
44 u32 wimm;
44}; 45};
45 46
46void corec37d_ntfy_init(struct nouveau_bo *, u32); 47void corec37d_ntfy_init(struct nouveau_bo *, u32);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 2e7a0c347ddb..06ee23823a68 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
306 asyh->set.or = head->func->or != NULL; 306 asyh->set.or = head->func->or != NULL;
307 } 307 }
308 308
309 if (asyh->state.mode_changed) 309 if (asyh->state.mode_changed || asyh->state.connectors_changed)
310 nv50_head_atomic_check_mode(head, asyh); 310 nv50_head_atomic_check_mode(head, asyh);
311 311
312 if (asyh->state.color_mgmt_changed || 312 if (asyh->state.color_mgmt_changed ||
@@ -413,6 +413,7 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
413 asyh->ovly = armh->ovly; 413 asyh->ovly = armh->ovly;
414 asyh->dither = armh->dither; 414 asyh->dither = armh->dither;
415 asyh->procamp = armh->procamp; 415 asyh->procamp = armh->procamp;
416 asyh->or = armh->or;
416 asyh->dp = armh->dp; 417 asyh->dp = armh->dp;
417 asyh->clr.mask = 0; 418 asyh->clr.mask = 0;
418 asyh->set.mask = 0; 419 asyh->set.mask = 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 9103b8494279..f7dbd965e4e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
75 return ret; 75 return ret;
76 } 76 }
77 77
78 wndw->interlock.wimm = wndw->interlock.data;
78 wndw->immd = func; 79 wndw->immd = func;
79 return 0; 80 return 0;
80} 81}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index b95181027b31..283ff690350e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -127,7 +127,7 @@ void
127nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock, 127nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
128 struct nv50_wndw_atom *asyw) 128 struct nv50_wndw_atom *asyw)
129{ 129{
130 if (interlock) { 130 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
131 asyw->image.mode = 0; 131 asyw->image.mode = 0;
132 asyw->image.interval = 1; 132 asyw->image.interval = 1;
133 } 133 }
@@ -149,7 +149,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
149 if (asyw->set.point) { 149 if (asyw->set.point) {
150 if (asyw->set.point = false, asyw->set.mask) 150 if (asyw->set.point = false, asyw->set.mask)
151 interlock[wndw->interlock.type] |= wndw->interlock.data; 151 interlock[wndw->interlock.type] |= wndw->interlock.data;
152 interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data; 152 interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
153 153
154 wndw->immd->point(wndw, asyw); 154 wndw->immd->point(wndw, asyw);
155 wndw->immd->update(wndw, interlock); 155 wndw->immd->update(wndw, interlock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 22cd45845e07..7c2fcaba42d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -631,7 +631,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
631 /* We need to check that the chipset is supported before booting 631 /* We need to check that the chipset is supported before booting
632 * fbdev off the hardware, as there's no way to put it back. 632 * fbdev off the hardware, as there's no way to put it back.
633 */ 633 */
634 ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device); 634 ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
635 true, false, 0, &device);
635 if (ret) 636 if (ret)
636 return ret; 637 return ret;
637 638
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7971096b6767..10d91e8bbb94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2540,6 +2540,41 @@ nv166_chipset = {
2540 .sec2 = tu102_sec2_new, 2540 .sec2 = tu102_sec2_new,
2541}; 2541};
2542 2542
2543static const struct nvkm_device_chip
2544nv167_chipset = {
2545 .name = "TU117",
2546 .bar = tu102_bar_new,
2547 .bios = nvkm_bios_new,
2548 .bus = gf100_bus_new,
2549 .devinit = tu102_devinit_new,
2550 .fault = tu102_fault_new,
2551 .fb = gv100_fb_new,
2552 .fuse = gm107_fuse_new,
2553 .gpio = gk104_gpio_new,
2554 .gsp = gv100_gsp_new,
2555 .i2c = gm200_i2c_new,
2556 .ibus = gm200_ibus_new,
2557 .imem = nv50_instmem_new,
2558 .ltc = gp102_ltc_new,
2559 .mc = tu102_mc_new,
2560 .mmu = tu102_mmu_new,
2561 .pci = gp100_pci_new,
2562 .pmu = gp102_pmu_new,
2563 .therm = gp100_therm_new,
2564 .timer = gk20a_timer_new,
2565 .top = gk104_top_new,
2566 .ce[0] = tu102_ce_new,
2567 .ce[1] = tu102_ce_new,
2568 .ce[2] = tu102_ce_new,
2569 .ce[3] = tu102_ce_new,
2570 .ce[4] = tu102_ce_new,
2571 .disp = tu102_disp_new,
2572 .dma = gv100_dma_new,
2573 .fifo = tu102_fifo_new,
2574 .nvdec[0] = gp102_nvdec_new,
2575 .sec2 = tu102_sec2_new,
2576};
2577
2543static int 2578static int
2544nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 2579nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
2545 struct nvkm_notify *notify) 2580 struct nvkm_notify *notify)
@@ -2824,8 +2859,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2824 u64 mmio_base, mmio_size; 2859 u64 mmio_base, mmio_size;
2825 u32 boot0, strap; 2860 u32 boot0, strap;
2826 void __iomem *map; 2861 void __iomem *map;
2827 int ret = -EEXIST; 2862 int ret = -EEXIST, i;
2828 int i; 2863 unsigned chipset;
2829 2864
2830 mutex_lock(&nv_devices_mutex); 2865 mutex_lock(&nv_devices_mutex);
2831 if (nvkm_device_find_locked(handle)) 2866 if (nvkm_device_find_locked(handle))
@@ -2870,6 +2905,26 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2870 strap = ioread32_native(map + 0x101000); 2905 strap = ioread32_native(map + 0x101000);
2871 iounmap(map); 2906 iounmap(map);
2872 2907
2908 /* chipset can be overridden for devel/testing purposes */
2909 chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
2910 if (chipset) {
2911 u32 override_boot0;
2912
2913 if (chipset >= 0x10) {
2914 override_boot0 = ((chipset & 0x1ff) << 20);
2915 override_boot0 |= 0x000000a1;
2916 } else {
2917 if (chipset != 0x04)
2918 override_boot0 = 0x20104000;
2919 else
2920 override_boot0 = 0x20004000;
2921 }
2922
2923 nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
2924 boot0, override_boot0);
2925 boot0 = override_boot0;
2926 }
2927
2873 /* determine chipset and derive architecture from it */ 2928 /* determine chipset and derive architecture from it */
2874 if ((boot0 & 0x1f000000) > 0) { 2929 if ((boot0 & 0x1f000000) > 0) {
2875 device->chipset = (boot0 & 0x1ff00000) >> 20; 2930 device->chipset = (boot0 & 0x1ff00000) >> 20;
@@ -2996,6 +3051,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2996 case 0x162: device->chip = &nv162_chipset; break; 3051 case 0x162: device->chip = &nv162_chipset; break;
2997 case 0x164: device->chip = &nv164_chipset; break; 3052 case 0x164: device->chip = &nv164_chipset; break;
2998 case 0x166: device->chip = &nv166_chipset; break; 3053 case 0x166: device->chip = &nv166_chipset; break;
3054 case 0x167: device->chip = &nv167_chipset; break;
2999 default: 3055 default:
3000 nvdev_error(device, "unknown chipset (%08x)\n", boot0); 3056 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
3001 goto done; 3057 goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 5f301e632599..818d21bd28d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
365 * and it's better to have a failed modeset than that. 365 * and it's better to have a failed modeset than that.
366 */ 366 */
367 for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { 367 for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
368 if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) 368 if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
369 failsafe = cfg; 369 /* Try to respect sink limits too when selecting
370 * lowest link configuration.
371 */
372 if (!failsafe ||
373 (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
374 failsafe = cfg;
375 }
376
370 if (failsafe && cfg[1].rate < dataKBps) 377 if (failsafe && cfg[1].rate < dataKBps)
371 break; 378 break;
372 } 379 }
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 970f669c6d29..3b2bced1b015 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -165,6 +165,10 @@ err_out0:
165 165
166void panfrost_device_fini(struct panfrost_device *pfdev) 166void panfrost_device_fini(struct panfrost_device *pfdev)
167{ 167{
168 panfrost_job_fini(pfdev);
169 panfrost_mmu_fini(pfdev);
170 panfrost_gpu_fini(pfdev);
171 panfrost_reset_fini(pfdev);
168 panfrost_regulator_fini(pfdev); 172 panfrost_regulator_fini(pfdev);
169 panfrost_clk_fini(pfdev); 173 panfrost_clk_fini(pfdev);
170} 174}
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 94b0819ad50b..d11e2281dde6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -219,7 +219,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
219fail_job: 219fail_job:
220 panfrost_job_put(job); 220 panfrost_job_put(job);
221fail_out_sync: 221fail_out_sync:
222 drm_syncobj_put(sync_out); 222 if (sync_out)
223 drm_syncobj_put(sync_out);
223 224
224 return ret; 225 return ret;
225} 226}
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 0c5d391f0a8f..4501597f30ab 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm)
531 dev_err(drm->dev, "CLCD: unable to get clcdclk.\n"); 531 dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
532 return PTR_ERR(parent); 532 return PTR_ERR(parent);
533 } 533 }
534
535 spin_lock_init(&priv->tim2_lock);
536
534 /* If the clock divider is broken, use the parent directly */ 537 /* If the clock divider is broken, use the parent directly */
535 if (priv->variant->broken_clockdivider) { 538 if (priv->variant->broken_clockdivider) {
536 priv->clk = parent; 539 priv->clk = parent;
537 return 0; 540 return 0;
538 } 541 }
539 parent_name = __clk_get_name(parent); 542 parent_name = __clk_get_name(parent);
540
541 spin_lock_init(&priv->tim2_lock);
542 div->init = &init; 543 div->init = &init;
543 544
544 ret = devm_clk_hw_register(drm->dev, div); 545 ret = devm_clk_hw_register(drm->dev, div);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index aa898c699101..433df7036f96 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -922,12 +922,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
922 ref_div_max = max(min(100 / post_div, ref_div_max), 1u); 922 ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
923 923
924 /* get matching reference and feedback divider */ 924 /* get matching reference and feedback divider */
925 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 925 *ref_div = min(max(den/post_div, 1u), ref_div_max);
926 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); 926 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
927 927
928 /* limit fb divider to its maximum */ 928 /* limit fb divider to its maximum */
929 if (*fb_div > fb_div_max) { 929 if (*fb_div > fb_div_max) {
930 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); 930 *ref_div = (*ref_div * fb_div_max)/(*fb_div);
931 *fb_div = fb_div_max; 931 *fb_div = fb_div_max;
932 } 932 }
933} 933}