aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-05 22:38:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-05 22:38:15 -0500
commit9b108828ed25aff1239304437ec5fa8b9977a306 (patch)
treebccd69b7edf4e653923f1c319f931aff2ae26d4a
parent22f60701d5527cd8a07e0651bc12e226cf38da79 (diff)
parent6739b3d7bc18a5373efd863b11831e8f515fffe1 (diff)
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Fixes all over the place: - amdkfd: two static checker fixes - mst: a bunch of static checker and spec/hw interaction fixes - amdgpu: fix Iceland hw properly, and some fiji bugs, along with some write-combining fixes. - exynos: some regression fixes - adv7511: fix some EDID reading issues" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (38 commits) drm/dp/mst: deallocate payload on port destruction drm/dp/mst: Reverse order of MST enable and clearing VC payload table. drm/dp/mst: move GUID storage from mgr, port to only mst branch drm/dp/mst: change MST detection scheme drm/dp/mst: Calculate MST PBN with 31.32 fixed point drm: Add drm_fixp_from_fraction and drm_fixp2int_ceil drm/mst: Add range check for max_payloads during init drm/mst: Don't ignore the MST PBN self-test result drm: fix missing reference counting decrease drm/amdgpu: disable uvd and vce clockgating on Fiji drm/amdgpu: remove exp hardware support from iceland drm/amdgpu: load MEC ucode manually on iceland drm/amdgpu: don't load MEC2 on topaz drm/amdgpu: drop topaz support from gmc8 module drm/amdgpu: pull topaz gmc bits into gmc_v7 drm/amdgpu: The VI specific EXE bit should only apply to GMC v8.0 above drm/amdgpu: iceland use CI based MC IP drm/amdgpu: move gmc7 support out of CIK dependency drm/amdgpu/gfx7: enable cp inst/reg error interrupts drm/amdgpu/gfx8: enable cp inst/reg error interrupts ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c279
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c48
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h12
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--include/drm/drm_cache.h9
-rw-r--r--include/drm/drm_dp_mst_helper.h25
-rw-r--r--include/drm/drm_fixed.h53
28 files changed, 461 insertions, 252 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 66f729eaf00b..20c9539abc36 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
25 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o 25 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
26 26
27# add asic specific block 27# add asic specific block
28amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ 28amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
29 ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ 29 ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
30 amdgpu_amdkfd_gfx_v7.o 30 amdgpu_amdkfd_gfx_v7.o
31 31
@@ -34,6 +34,7 @@ amdgpu-y += \
34 34
35# add GMC block 35# add GMC block
36amdgpu-y += \ 36amdgpu-y += \
37 gmc_v7_0.o \
37 gmc_v8_0.o 38 gmc_v8_0.o
38 39
39# add IH block 40# add IH block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0e1376317683..362bedc9e507 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
154 .get_fw_version = get_fw_version 154 .get_fw_version = get_fw_version
155}; 155};
156 156
157struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions() 157struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
158{ 158{
159 return (struct kfd2kgd_calls *)&kfd2kgd; 159 return (struct kfd2kgd_calls *)&kfd2kgd;
160} 160}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 79fa5c7de856..04b744d64b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
115 .get_fw_version = get_fw_version 115 .get_fw_version = get_fw_version
116}; 116};
117 117
118struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions() 118struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
119{ 119{
120 return (struct kfd2kgd_calls *)&kfd2kgd; 120 return (struct kfd2kgd_calls *)&kfd2kgd;
121} 121}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b5dbbb573491..9c1af8976bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -256,11 +256,11 @@ static struct pci_device_id pciidlist[] = {
256 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 256 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
257#endif 257#endif
258 /* topaz */ 258 /* topaz */
259 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 259 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
260 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 260 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
261 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 261 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
262 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 262 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
263 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 263 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
264 /* tonga */ 264 /* tonga */
265 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 265 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
266 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 266 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index a2a16acee34d..b8fbbd7699e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h> 35#include <drm/amdgpu_drm.h>
36#include <drm/drm_cache.h>
36#include "amdgpu.h" 37#include "amdgpu.h"
37#include "amdgpu_trace.h" 38#include "amdgpu_trace.h"
38 39
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
261 AMDGPU_GEM_DOMAIN_OA); 262 AMDGPU_GEM_DOMAIN_OA);
262 263
263 bo->flags = flags; 264 bo->flags = flags;
265
266 /* For architectures that don't support WC memory,
267 * mask out the WC flag from the BO
268 */
269 if (!drm_arch_can_wc_memory())
270 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
271
264 amdgpu_fill_placement_to_bo(bo, placement); 272 amdgpu_fill_placement_to_bo(bo, placement);
265 /* Kernel allocation are uninterruptible */ 273 /* Kernel allocation are uninterruptible */
266 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, 274 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8a1752ff3d8e..55cf05e1c81c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -808,7 +808,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
808 flags |= AMDGPU_PTE_SNOOPED; 808 flags |= AMDGPU_PTE_SNOOPED;
809 } 809 }
810 810
811 if (adev->asic_type >= CHIP_TOPAZ) 811 if (adev->asic_type >= CHIP_TONGA)
812 flags |= AMDGPU_PTE_EXECUTABLE; 812 flags |= AMDGPU_PTE_EXECUTABLE;
813 813
814 flags |= AMDGPU_PTE_READABLE; 814 flags |= AMDGPU_PTE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 72793f93e2fc..6c76139de1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4738,6 +4738,22 @@ static int gfx_v7_0_early_init(void *handle)
4738 return 0; 4738 return 0;
4739} 4739}
4740 4740
4741static int gfx_v7_0_late_init(void *handle)
4742{
4743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4744 int r;
4745
4746 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4747 if (r)
4748 return r;
4749
4750 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4751 if (r)
4752 return r;
4753
4754 return 0;
4755}
4756
4741static int gfx_v7_0_sw_init(void *handle) 4757static int gfx_v7_0_sw_init(void *handle)
4742{ 4758{
4743 struct amdgpu_ring *ring; 4759 struct amdgpu_ring *ring;
@@ -4890,6 +4906,8 @@ static int gfx_v7_0_hw_fini(void *handle)
4890{ 4906{
4891 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4907 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4892 4908
4909 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4910 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4893 gfx_v7_0_cp_enable(adev, false); 4911 gfx_v7_0_cp_enable(adev, false);
4894 gfx_v7_0_rlc_stop(adev); 4912 gfx_v7_0_rlc_stop(adev);
4895 gfx_v7_0_fini_pg(adev); 4913 gfx_v7_0_fini_pg(adev);
@@ -5527,7 +5545,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
5527 5545
5528const struct amd_ip_funcs gfx_v7_0_ip_funcs = { 5546const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5529 .early_init = gfx_v7_0_early_init, 5547 .early_init = gfx_v7_0_early_init,
5530 .late_init = NULL, 5548 .late_init = gfx_v7_0_late_init,
5531 .sw_init = gfx_v7_0_sw_init, 5549 .sw_init = gfx_v7_0_sw_init,
5532 .sw_fini = gfx_v7_0_sw_fini, 5550 .sw_fini = gfx_v7_0_sw_fini,
5533 .hw_init = gfx_v7_0_hw_init, 5551 .hw_init = gfx_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 95c0cdfbd1b3..8f8ec37ecd88 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
111MODULE_FIRMWARE("amdgpu/topaz_pfp.bin"); 111MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
112MODULE_FIRMWARE("amdgpu/topaz_me.bin"); 112MODULE_FIRMWARE("amdgpu/topaz_me.bin");
113MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); 113MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
114MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
115MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); 114MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
116 115
117MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); 116MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
828 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 827 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
829 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 828 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
830 829
831 if (adev->asic_type != CHIP_STONEY) { 830 if ((adev->asic_type != CHIP_STONEY) &&
831 (adev->asic_type != CHIP_TOPAZ)) {
832 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 832 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
833 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 833 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
834 if (!err) { 834 if (!err) {
@@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
3851 if (r) 3851 if (r)
3852 return -EINVAL; 3852 return -EINVAL;
3853 3853
3854 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 3854 if (adev->asic_type == CHIP_TOPAZ) {
3855 AMDGPU_UCODE_ID_CP_MEC1); 3855 r = gfx_v8_0_cp_compute_load_microcode(adev);
3856 if (r) 3856 if (r)
3857 return -EINVAL; 3857 return r;
3858 } else {
3859 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3860 AMDGPU_UCODE_ID_CP_MEC1);
3861 if (r)
3862 return -EINVAL;
3863 }
3858 } 3864 }
3859 } 3865 }
3860 3866
@@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle)
3901{ 3907{
3902 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3908 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3903 3909
3910 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3911 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3904 gfx_v8_0_cp_enable(adev, false); 3912 gfx_v8_0_cp_enable(adev, false);
3905 gfx_v8_0_rlc_stop(adev); 3913 gfx_v8_0_rlc_stop(adev);
3906 gfx_v8_0_cp_compute_fini(adev); 3914 gfx_v8_0_cp_compute_fini(adev);
@@ -4329,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle)
4329 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4330 int r; 4338 int r;
4331 4339
4340 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4341 if (r)
4342 return r;
4343
4344 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4345 if (r)
4346 return r;
4347
4332 /* requires IBs so do in late init after IB pool is initialized */ 4348 /* requires IBs so do in late init after IB pool is initialized */
4333 r = gfx_v8_0_do_edc_gpr_workarounds(adev); 4349 r = gfx_v8_0_do_edc_gpr_workarounds(adev);
4334 if (r) 4350 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 3f956065d069..8aa2991ab379 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 42
43MODULE_FIRMWARE("radeon/bonaire_mc.bin"); 43MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 44MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46
47static const u32 golden_settings_iceland_a11[] =
48{
49 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
50 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
51 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
52 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
53};
54
55static const u32 iceland_mgcg_cgcg_init[] =
56{
57 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
58};
59
60static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
61{
62 switch (adev->asic_type) {
63 case CHIP_TOPAZ:
64 amdgpu_program_register_sequence(adev,
65 iceland_mgcg_cgcg_init,
66 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
67 amdgpu_program_register_sequence(adev,
68 golden_settings_iceland_a11,
69 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
70 break;
71 default:
72 break;
73 }
74}
45 75
46/** 76/**
47 * gmc8_mc_wait_for_idle - wait for MC idle callback. 77 * gmc7_mc_wait_for_idle - wait for MC idle callback.
48 * 78 *
49 * @adev: amdgpu_device pointer 79 * @adev: amdgpu_device pointer
50 * 80 *
@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
132 case CHIP_HAWAII: 162 case CHIP_HAWAII:
133 chip_name = "hawaii"; 163 chip_name = "hawaii";
134 break; 164 break;
165 case CHIP_TOPAZ:
166 chip_name = "topaz";
167 break;
135 case CHIP_KAVERI: 168 case CHIP_KAVERI:
136 case CHIP_KABINI: 169 case CHIP_KABINI:
137 return 0; 170 return 0;
138 default: BUG(); 171 default: BUG();
139 } 172 }
140 173
141 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 174 if (adev->asic_type == CHIP_TOPAZ)
175 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
176 else
177 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
178
142 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 179 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
143 if (err) 180 if (err)
144 goto out; 181 goto out;
@@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle)
984 int r; 1021 int r;
985 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1022 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
986 1023
1024 gmc_v7_0_init_golden_registers(adev);
1025
987 gmc_v7_0_mc_program(adev); 1026 gmc_v7_0_mc_program(adev);
988 1027
989 if (!(adev->flags & AMD_IS_APU)) { 1028 if (!(adev->flags & AMD_IS_APU)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c0c9a0101eb4..3efd45546241 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -42,9 +42,7 @@
42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); 42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
44 44
45MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); 45MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
48 46
49static const u32 golden_settings_tonga_a11[] = 47static const u32 golden_settings_tonga_a11[] =
50{ 48{
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
75 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 73 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
76}; 74};
77 75
78static const u32 golden_settings_iceland_a11[] =
79{
80 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
81 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
82 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
83 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
84};
85
86static const u32 iceland_mgcg_cgcg_init[] =
87{
88 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
89};
90
91static const u32 cz_mgcg_cgcg_init[] = 76static const u32 cz_mgcg_cgcg_init[] =
92{ 77{
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 78 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
102static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) 87static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
103{ 88{
104 switch (adev->asic_type) { 89 switch (adev->asic_type) {
105 case CHIP_TOPAZ:
106 amdgpu_program_register_sequence(adev,
107 iceland_mgcg_cgcg_init,
108 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
109 amdgpu_program_register_sequence(adev,
110 golden_settings_iceland_a11,
111 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
112 break;
113 case CHIP_FIJI: 90 case CHIP_FIJI:
114 amdgpu_program_register_sequence(adev, 91 amdgpu_program_register_sequence(adev,
115 fiji_mgcg_cgcg_init, 92 fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
229 DRM_DEBUG("\n"); 206 DRM_DEBUG("\n");
230 207
231 switch (adev->asic_type) { 208 switch (adev->asic_type) {
232 case CHIP_TOPAZ:
233 chip_name = "topaz";
234 break;
235 case CHIP_TONGA: 209 case CHIP_TONGA:
236 chip_name = "tonga"; 210 chip_name = "tonga";
237 break; 211 break;
238 case CHIP_FIJI: 212 case CHIP_FIJI:
239 chip_name = "fiji";
240 break;
241 case CHIP_CARRIZO: 213 case CHIP_CARRIZO:
242 case CHIP_STONEY: 214 case CHIP_STONEY:
243 return 0; 215 return 0;
@@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle)
1007 979
1008 gmc_v8_0_mc_program(adev); 980 gmc_v8_0_mc_program(adev);
1009 981
1010 if (!(adev->flags & AMD_IS_APU)) { 982 if (adev->asic_type == CHIP_TONGA) {
1011 r = gmc_v8_0_mc_load_microcode(adev); 983 r = gmc_v8_0_mc_load_microcode(adev);
1012 if (r) { 984 if (r) {
1013 DRM_ERROR("Failed to load MC firmware!\n"); 985 DRM_ERROR("Failed to load MC firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 966d4b2ed9da..090486c18249 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
432 case AMDGPU_UCODE_ID_CP_ME: 432 case AMDGPU_UCODE_ID_CP_ME:
433 return UCODE_ID_CP_ME_MASK; 433 return UCODE_ID_CP_ME_MASK;
434 case AMDGPU_UCODE_ID_CP_MEC1: 434 case AMDGPU_UCODE_ID_CP_MEC1:
435 return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; 435 return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
436 case AMDGPU_UCODE_ID_CP_MEC2: 436 case AMDGPU_UCODE_ID_CP_MEC2:
437 return UCODE_ID_CP_MEC_MASK; 437 return UCODE_ID_CP_MEC_MASK;
438 case AMDGPU_UCODE_ID_RLC_G: 438 case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
522 return -EINVAL; 522 return -EINVAL;
523 } 523 }
524 524
525 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
526 &toc->entry[toc->num_entries++])) {
527 DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
528 return -EINVAL;
529 }
530
531 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, 525 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
532 &toc->entry[toc->num_entries++])) { 526 &toc->entry[toc->num_entries++])) {
533 DRM_ERROR("Failed to get firmware entry for SDMA0\n"); 527 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
550 UCODE_ID_CP_ME_MASK | 544 UCODE_ID_CP_ME_MASK |
551 UCODE_ID_CP_PFP_MASK | 545 UCODE_ID_CP_PFP_MASK |
552 UCODE_ID_CP_MEC_MASK | 546 UCODE_ID_CP_MEC_MASK |
553 UCODE_ID_CP_MEC_JT1_MASK | 547 UCODE_ID_CP_MEC_JT1_MASK;
554 UCODE_ID_CP_MEC_JT2_MASK; 548
555 549
556 if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { 550 if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
557 DRM_ERROR("Fail to request SMU load ucode\n"); 551 DRM_ERROR("Fail to request SMU load ucode\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 652e76644c31..89f5a1ff6f43 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -61,6 +61,7 @@
61#include "vi.h" 61#include "vi.h"
62#include "vi_dpm.h" 62#include "vi_dpm.h"
63#include "gmc_v8_0.h" 63#include "gmc_v8_0.h"
64#include "gmc_v7_0.h"
64#include "gfx_v8_0.h" 65#include "gfx_v8_0.h"
65#include "sdma_v2_4.h" 66#include "sdma_v2_4.h"
66#include "sdma_v3_0.h" 67#include "sdma_v3_0.h"
@@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1109 }, 1110 },
1110 { 1111 {
1111 .type = AMD_IP_BLOCK_TYPE_GMC, 1112 .type = AMD_IP_BLOCK_TYPE_GMC,
1112 .major = 8, 1113 .major = 7,
1113 .minor = 0, 1114 .minor = 4,
1114 .rev = 0, 1115 .rev = 0,
1115 .funcs = &gmc_v8_0_ip_funcs, 1116 .funcs = &gmc_v7_0_ip_funcs,
1116 }, 1117 },
1117 { 1118 {
1118 .type = AMD_IP_BLOCK_TYPE_IH, 1119 .type = AMD_IP_BLOCK_TYPE_IH,
@@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle)
1442 break; 1443 break;
1443 case CHIP_FIJI: 1444 case CHIP_FIJI:
1444 adev->has_uvd = true; 1445 adev->has_uvd = true;
1445 adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG | 1446 adev->cg_flags = 0;
1446 AMDGPU_CG_SUPPORT_VCE_MGCG;
1447 adev->pg_flags = 0; 1447 adev->pg_flags = 0;
1448 adev->external_rev_id = adev->rev_id + 0x3c; 1448 adev->external_rev_id = adev->rev_id + 0x3c;
1449 break; 1449 break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 9be007081b72..a902ae037398 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work)
194 194
195 kfree(p); 195 kfree(p);
196 196
197 kfree((void *)work); 197 kfree(work);
198} 198}
199 199
200static void kfd_process_destroy_delayed(struct rcu_head *rcu) 200static void kfd_process_destroy_delayed(struct rcu_head *rcu)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6ed90a2437e5..8ae13de272c4 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -803,6 +803,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
803 return mstb; 803 return mstb;
804} 804}
805 805
806static void drm_dp_free_mst_port(struct kref *kref);
807
808static void drm_dp_free_mst_branch_device(struct kref *kref)
809{
810 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
811 if (mstb->port_parent) {
812 if (list_empty(&mstb->port_parent->next))
813 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
814 }
815 kfree(mstb);
816}
817
806static void drm_dp_destroy_mst_branch_device(struct kref *kref) 818static void drm_dp_destroy_mst_branch_device(struct kref *kref)
807{ 819{
808 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 820 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
@@ -810,6 +822,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
810 bool wake_tx = false; 822 bool wake_tx = false;
811 823
812 /* 824 /*
825 * init kref again to be used by ports to remove mst branch when it is
826 * not needed anymore
827 */
828 kref_init(kref);
829
830 if (mstb->port_parent && list_empty(&mstb->port_parent->next))
831 kref_get(&mstb->port_parent->kref);
832
833 /*
813 * destroy all ports - don't need lock 834 * destroy all ports - don't need lock
814 * as there are no more references to the mst branch 835 * as there are no more references to the mst branch
815 * device at this point. 836 * device at this point.
@@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
835 856
836 if (wake_tx) 857 if (wake_tx)
837 wake_up(&mstb->mgr->tx_waitq); 858 wake_up(&mstb->mgr->tx_waitq);
838 kfree(mstb); 859
860 kref_put(kref, drm_dp_free_mst_branch_device);
839} 861}
840 862
841static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) 863static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref)
883 * from an EDID retrieval */ 905 * from an EDID retrieval */
884 906
885 mutex_lock(&mgr->destroy_connector_lock); 907 mutex_lock(&mgr->destroy_connector_lock);
908 kref_get(&port->parent->kref);
886 list_add(&port->next, &mgr->destroy_connector_list); 909 list_add(&port->next, &mgr->destroy_connector_list);
887 mutex_unlock(&mgr->destroy_connector_lock); 910 mutex_unlock(&mgr->destroy_connector_lock);
888 schedule_work(&mgr->destroy_connector_work); 911 schedule_work(&mgr->destroy_connector_work);
@@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1018 return send_link; 1041 return send_link;
1019} 1042}
1020 1043
1021static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, 1044static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1022 struct drm_dp_mst_port *port)
1023{ 1045{
1024 int ret; 1046 int ret;
1025 if (port->dpcd_rev >= 0x12) { 1047
1026 port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); 1048 memcpy(mstb->guid, guid, 16);
1027 if (!port->guid_valid) { 1049
1028 ret = drm_dp_send_dpcd_write(mstb->mgr, 1050 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1029 port, 1051 if (mstb->port_parent) {
1030 DP_GUID, 1052 ret = drm_dp_send_dpcd_write(
1031 16, port->guid); 1053 mstb->mgr,
1032 port->guid_valid = true; 1054 mstb->port_parent,
1055 DP_GUID,
1056 16,
1057 mstb->guid);
1058 } else {
1059
1060 ret = drm_dp_dpcd_write(
1061 mstb->mgr->aux,
1062 DP_GUID,
1063 mstb->guid,
1064 16);
1033 } 1065 }
1034 } 1066 }
1035} 1067}
@@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1086 port->dpcd_rev = port_msg->dpcd_revision; 1118 port->dpcd_rev = port_msg->dpcd_revision;
1087 port->num_sdp_streams = port_msg->num_sdp_streams; 1119 port->num_sdp_streams = port_msg->num_sdp_streams;
1088 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 1120 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1089 memcpy(port->guid, port_msg->peer_guid, 16);
1090 1121
1091 /* manage mstb port lists with mgr lock - take a reference 1122 /* manage mstb port lists with mgr lock - take a reference
1092 for this list */ 1123 for this list */
@@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1099 1130
1100 if (old_ddps != port->ddps) { 1131 if (old_ddps != port->ddps) {
1101 if (port->ddps) { 1132 if (port->ddps) {
1102 drm_dp_check_port_guid(mstb, port);
1103 if (!port->input) 1133 if (!port->input)
1104 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); 1134 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1105 } else { 1135 } else {
1106 port->guid_valid = false;
1107 port->available_pbn = 0; 1136 port->available_pbn = 0;
1108 } 1137 }
1109 } 1138 }
@@ -1130,13 +1159,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1130 drm_dp_put_port(port); 1159 drm_dp_put_port(port);
1131 goto out; 1160 goto out;
1132 } 1161 }
1133 if (port->port_num >= DP_MST_LOGICAL_PORT_0) { 1162
1134 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1163 drm_mode_connector_set_tile_property(port->connector);
1135 drm_mode_connector_set_tile_property(port->connector); 1164
1136 }
1137 (*mstb->mgr->cbs->register_connector)(port->connector); 1165 (*mstb->mgr->cbs->register_connector)(port->connector);
1138 } 1166 }
1139
1140out: 1167out:
1141 /* put reference to this port */ 1168 /* put reference to this port */
1142 drm_dp_put_port(port); 1169 drm_dp_put_port(port);
@@ -1161,11 +1188,9 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1161 port->ddps = conn_stat->displayport_device_plug_status; 1188 port->ddps = conn_stat->displayport_device_plug_status;
1162 1189
1163 if (old_ddps != port->ddps) { 1190 if (old_ddps != port->ddps) {
1191 dowork = true;
1164 if (port->ddps) { 1192 if (port->ddps) {
1165 drm_dp_check_port_guid(mstb, port);
1166 dowork = true;
1167 } else { 1193 } else {
1168 port->guid_valid = false;
1169 port->available_pbn = 0; 1194 port->available_pbn = 0;
1170 } 1195 }
1171 } 1196 }
@@ -1222,13 +1247,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1222 struct drm_dp_mst_branch *found_mstb; 1247 struct drm_dp_mst_branch *found_mstb;
1223 struct drm_dp_mst_port *port; 1248 struct drm_dp_mst_port *port;
1224 1249
1250 if (memcmp(mstb->guid, guid, 16) == 0)
1251 return mstb;
1252
1253
1225 list_for_each_entry(port, &mstb->ports, next) { 1254 list_for_each_entry(port, &mstb->ports, next) {
1226 if (!port->mstb) 1255 if (!port->mstb)
1227 continue; 1256 continue;
1228 1257
1229 if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
1230 return port->mstb;
1231
1232 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); 1258 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1233 1259
1234 if (found_mstb) 1260 if (found_mstb)
@@ -1247,10 +1273,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1247 /* find the port by iterating down */ 1273 /* find the port by iterating down */
1248 mutex_lock(&mgr->lock); 1274 mutex_lock(&mgr->lock);
1249 1275
1250 if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0) 1276 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1251 mstb = mgr->mst_primary;
1252 else
1253 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1254 1277
1255 if (mstb) 1278 if (mstb)
1256 kref_get(&mstb->kref); 1279 kref_get(&mstb->kref);
@@ -1271,8 +1294,13 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1271 if (port->input) 1294 if (port->input)
1272 continue; 1295 continue;
1273 1296
1274 if (!port->ddps) 1297 if (!port->ddps) {
1298 if (port->cached_edid) {
1299 kfree(port->cached_edid);
1300 port->cached_edid = NULL;
1301 }
1275 continue; 1302 continue;
1303 }
1276 1304
1277 if (!port->available_pbn) 1305 if (!port->available_pbn)
1278 drm_dp_send_enum_path_resources(mgr, mstb, port); 1306 drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1283,6 +1311,12 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1283 drm_dp_check_and_send_link_address(mgr, mstb_child); 1311 drm_dp_check_and_send_link_address(mgr, mstb_child);
1284 drm_dp_put_mst_branch_device(mstb_child); 1312 drm_dp_put_mst_branch_device(mstb_child);
1285 } 1313 }
1314 } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
1315 port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
1316 if (!port->cached_edid) {
1317 port->cached_edid =
1318 drm_get_edid(port->connector, &port->aux.ddc);
1319 }
1286 } 1320 }
1287 } 1321 }
1288} 1322}
@@ -1302,6 +1336,8 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
1302 drm_dp_check_and_send_link_address(mgr, mstb); 1336 drm_dp_check_and_send_link_address(mgr, mstb);
1303 drm_dp_put_mst_branch_device(mstb); 1337 drm_dp_put_mst_branch_device(mstb);
1304 } 1338 }
1339
1340 (*mgr->cbs->hotplug)(mgr);
1305} 1341}
1306 1342
1307static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1343static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1555,10 +1591,12 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1555 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, 1591 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1556 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); 1592 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1557 } 1593 }
1594
1595 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1596
1558 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1597 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1559 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1598 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1560 } 1599 }
1561 (*mgr->cbs->hotplug)(mgr);
1562 } 1600 }
1563 } else { 1601 } else {
1564 mstb->link_address_sent = false; 1602 mstb->link_address_sent = false;
@@ -1602,6 +1640,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1602 return 0; 1640 return 0;
1603} 1641}
1604 1642
1643static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1644{
1645 if (!mstb->port_parent)
1646 return NULL;
1647
1648 if (mstb->port_parent->mstb != mstb)
1649 return mstb->port_parent;
1650
1651 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1652}
1653
1654static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1655 struct drm_dp_mst_branch *mstb,
1656 int *port_num)
1657{
1658 struct drm_dp_mst_branch *rmstb = NULL;
1659 struct drm_dp_mst_port *found_port;
1660 mutex_lock(&mgr->lock);
1661 if (mgr->mst_primary) {
1662 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1663
1664 if (found_port) {
1665 rmstb = found_port->parent;
1666 kref_get(&rmstb->kref);
1667 *port_num = found_port->port_num;
1668 }
1669 }
1670 mutex_unlock(&mgr->lock);
1671 return rmstb;
1672}
1673
1605static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 1674static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1606 struct drm_dp_mst_port *port, 1675 struct drm_dp_mst_port *port,
1607 int id, 1676 int id,
@@ -1609,13 +1678,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1609{ 1678{
1610 struct drm_dp_sideband_msg_tx *txmsg; 1679 struct drm_dp_sideband_msg_tx *txmsg;
1611 struct drm_dp_mst_branch *mstb; 1680 struct drm_dp_mst_branch *mstb;
1612 int len, ret; 1681 int len, ret, port_num;
1613 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1682 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1614 int i; 1683 int i;
1615 1684
1685 port_num = port->port_num;
1616 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1686 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1617 if (!mstb) 1687 if (!mstb) {
1618 return -EINVAL; 1688 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1689
1690 if (!mstb)
1691 return -EINVAL;
1692 }
1619 1693
1620 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1694 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1621 if (!txmsg) { 1695 if (!txmsg) {
@@ -1627,7 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1627 sinks[i] = i; 1701 sinks[i] = i;
1628 1702
1629 txmsg->dst = mstb; 1703 txmsg->dst = mstb;
1630 len = build_allocate_payload(txmsg, port->port_num, 1704 len = build_allocate_payload(txmsg, port_num,
1631 id, 1705 id,
1632 pbn, port->num_sdp_streams, sinks); 1706 pbn, port->num_sdp_streams, sinks);
1633 1707
@@ -1983,31 +2057,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
1983 mgr->mst_primary = mstb; 2057 mgr->mst_primary = mstb;
1984 kref_get(&mgr->mst_primary->kref); 2058 kref_get(&mgr->mst_primary->kref);
1985 2059
1986 {
1987 struct drm_dp_payload reset_pay;
1988 reset_pay.start_slot = 0;
1989 reset_pay.num_slots = 0x3f;
1990 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1991 }
1992
1993 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2060 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1994 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 2061 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1995 if (ret < 0) { 2062 if (ret < 0) {
1996 goto out_unlock; 2063 goto out_unlock;
1997 } 2064 }
1998 2065
1999 2066 {
2000 /* sort out guid */ 2067 struct drm_dp_payload reset_pay;
2001 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); 2068 reset_pay.start_slot = 0;
2002 if (ret != 16) { 2069 reset_pay.num_slots = 0x3f;
2003 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); 2070 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2004 goto out_unlock;
2005 }
2006
2007 mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
2008 if (!mgr->guid_valid) {
2009 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
2010 mgr->guid_valid = true;
2011 } 2071 }
2012 2072
2013 queue_work(system_long_wq, &mgr->work); 2073 queue_work(system_long_wq, &mgr->work);
@@ -2231,9 +2291,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2231 } 2291 }
2232 2292
2233 drm_dp_update_port(mstb, &msg.u.conn_stat); 2293 drm_dp_update_port(mstb, &msg.u.conn_stat);
2234 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2235 (*mgr->cbs->hotplug)(mgr);
2236 2294
2295 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2237 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2296 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2238 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2297 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2239 if (!mstb) 2298 if (!mstb)
@@ -2320,10 +2379,6 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
2320 2379
2321 case DP_PEER_DEVICE_SST_SINK: 2380 case DP_PEER_DEVICE_SST_SINK:
2322 status = connector_status_connected; 2381 status = connector_status_connected;
2323 /* for logical ports - cache the EDID */
2324 if (port->port_num >= 8 && !port->cached_edid) {
2325 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2326 }
2327 break; 2382 break;
2328 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2383 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2329 if (port->ldps) 2384 if (port->ldps)
@@ -2378,10 +2433,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2378 2433
2379 if (port->cached_edid) 2434 if (port->cached_edid)
2380 edid = drm_edid_duplicate(port->cached_edid); 2435 edid = drm_edid_duplicate(port->cached_edid);
2381 else { 2436
2382 edid = drm_get_edid(connector, &port->aux.ddc);
2383 drm_mode_connector_set_tile_property(connector);
2384 }
2385 port->has_audio = drm_detect_monitor_audio(edid); 2437 port->has_audio = drm_detect_monitor_audio(edid);
2386 drm_dp_put_port(port); 2438 drm_dp_put_port(port);
2387 return edid; 2439 return edid;
@@ -2446,6 +2498,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
2446 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); 2498 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2447 if (pbn == port->vcpi.pbn) { 2499 if (pbn == port->vcpi.pbn) {
2448 *slots = port->vcpi.num_slots; 2500 *slots = port->vcpi.num_slots;
2501 drm_dp_put_port(port);
2449 return true; 2502 return true;
2450 } 2503 }
2451 } 2504 }
@@ -2605,32 +2658,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
2605 */ 2658 */
2606int drm_dp_calc_pbn_mode(int clock, int bpp) 2659int drm_dp_calc_pbn_mode(int clock, int bpp)
2607{ 2660{
2608 fixed20_12 pix_bw; 2661 u64 kbps;
2609 fixed20_12 fbpp; 2662 s64 peak_kbps;
2610 fixed20_12 result; 2663 u32 numerator;
2611 fixed20_12 margin, tmp; 2664 u32 denominator;
2612 u32 res; 2665
2613 2666 kbps = clock * bpp;
2614 pix_bw.full = dfixed_const(clock); 2667
2615 fbpp.full = dfixed_const(bpp); 2668 /*
2616 tmp.full = dfixed_const(8); 2669 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2617 fbpp.full = dfixed_div(fbpp, tmp); 2670 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2618 2671 * common multiplier to render an integer PBN for all link rate/lane
2619 result.full = dfixed_mul(pix_bw, fbpp); 2672 * counts combinations
2620 margin.full = dfixed_const(54); 2673 * calculate
2621 tmp.full = dfixed_const(64); 2674 * peak_kbps *= (1006/1000)
2622 margin.full = dfixed_div(margin, tmp); 2675 * peak_kbps *= (64/54)
2623 result.full = dfixed_div(result, margin); 2676 * peak_kbps *= 8 convert to bytes
2624 2677 */
2625 margin.full = dfixed_const(1006); 2678
2626 tmp.full = dfixed_const(1000); 2679 numerator = 64 * 1006;
2627 margin.full = dfixed_div(margin, tmp); 2680 denominator = 54 * 8 * 1000 * 1000;
2628 result.full = dfixed_mul(result, margin); 2681
2629 2682 kbps *= numerator;
2630 result.full = dfixed_div(result, tmp); 2683 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2631 result.full = dfixed_ceil(result); 2684
2632 res = dfixed_trunc(result); 2685 return drm_fixp2int_ceil(peak_kbps);
2633 return res;
2634} 2686}
2635EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 2687EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2636 2688
@@ -2638,11 +2690,23 @@ static int test_calc_pbn_mode(void)
2638{ 2690{
2639 int ret; 2691 int ret;
2640 ret = drm_dp_calc_pbn_mode(154000, 30); 2692 ret = drm_dp_calc_pbn_mode(154000, 30);
2641 if (ret != 689) 2693 if (ret != 689) {
2694 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2695 154000, 30, 689, ret);
2642 return -EINVAL; 2696 return -EINVAL;
2697 }
2643 ret = drm_dp_calc_pbn_mode(234000, 30); 2698 ret = drm_dp_calc_pbn_mode(234000, 30);
2644 if (ret != 1047) 2699 if (ret != 1047) {
2700 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2701 234000, 30, 1047, ret);
2645 return -EINVAL; 2702 return -EINVAL;
2703 }
2704 ret = drm_dp_calc_pbn_mode(297000, 24);
2705 if (ret != 1063) {
2706 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2707 297000, 24, 1063, ret);
2708 return -EINVAL;
2709 }
2646 return 0; 2710 return 0;
2647} 2711}
2648 2712
@@ -2783,6 +2847,13 @@ static void drm_dp_tx_work(struct work_struct *work)
2783 mutex_unlock(&mgr->qlock); 2847 mutex_unlock(&mgr->qlock);
2784} 2848}
2785 2849
2850static void drm_dp_free_mst_port(struct kref *kref)
2851{
2852 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2853 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2854 kfree(port);
2855}
2856
2786static void drm_dp_destroy_connector_work(struct work_struct *work) 2857static void drm_dp_destroy_connector_work(struct work_struct *work)
2787{ 2858{
2788 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2859 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2803,13 +2874,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2803 list_del(&port->next); 2874 list_del(&port->next);
2804 mutex_unlock(&mgr->destroy_connector_lock); 2875 mutex_unlock(&mgr->destroy_connector_lock);
2805 2876
2877 kref_init(&port->kref);
2878 INIT_LIST_HEAD(&port->next);
2879
2806 mgr->cbs->destroy_connector(mgr, port->connector); 2880 mgr->cbs->destroy_connector(mgr, port->connector);
2807 2881
2808 drm_dp_port_teardown_pdt(port, port->pdt); 2882 drm_dp_port_teardown_pdt(port, port->pdt);
2809 2883
2810 if (!port->input && port->vcpi.vcpi > 0) 2884 if (!port->input && port->vcpi.vcpi > 0) {
2811 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2885 if (mgr->mst_state) {
2812 kfree(port); 2886 drm_dp_mst_reset_vcpi_slots(mgr, port);
2887 drm_dp_update_payload_part1(mgr);
2888 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2889 }
2890 }
2891
2892 kref_put(&port->kref, drm_dp_free_mst_port);
2813 send_hotplug = true; 2893 send_hotplug = true;
2814 } 2894 }
2815 if (send_hotplug) 2895 if (send_hotplug)
@@ -2847,6 +2927,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2847 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 2927 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2848 mgr->max_payloads = max_payloads; 2928 mgr->max_payloads = max_payloads;
2849 mgr->conn_base_id = conn_base_id; 2929 mgr->conn_base_id = conn_base_id;
2930 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
2931 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
2932 return -EINVAL;
2850 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); 2933 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2851 if (!mgr->payloads) 2934 if (!mgr->payloads)
2852 return -ENOMEM; 2935 return -ENOMEM;
@@ -2854,7 +2937,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2854 if (!mgr->proposed_vcpis) 2937 if (!mgr->proposed_vcpis)
2855 return -ENOMEM; 2938 return -ENOMEM;
2856 set_bit(0, &mgr->payload_mask); 2939 set_bit(0, &mgr->payload_mask);
2857 test_calc_pbn_mode(); 2940 if (test_calc_pbn_mode() < 0)
2941 DRM_ERROR("MST PBN self-test failed\n");
2942
2858 return 0; 2943 return 0;
2859} 2944}
2860EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 2945EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index b79c316c2ad2..673164b331c8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = {
1392static int exynos_dp_probe(struct platform_device *pdev) 1392static int exynos_dp_probe(struct platform_device *pdev)
1393{ 1393{
1394 struct device *dev = &pdev->dev; 1394 struct device *dev = &pdev->dev;
1395 struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL; 1395 struct device_node *np = NULL, *endpoint = NULL;
1396 struct exynos_dp_device *dp; 1396 struct exynos_dp_device *dp;
1397 int ret; 1397 int ret;
1398 1398
@@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev)
1404 platform_set_drvdata(pdev, dp); 1404 platform_set_drvdata(pdev, dp);
1405 1405
1406 /* This is for the backward compatibility. */ 1406 /* This is for the backward compatibility. */
1407 panel_node = of_parse_phandle(dev->of_node, "panel", 0); 1407 np = of_parse_phandle(dev->of_node, "panel", 0);
1408 if (panel_node) { 1408 if (np) {
1409 dp->panel = of_drm_find_panel(panel_node); 1409 dp->panel = of_drm_find_panel(np);
1410 of_node_put(panel_node); 1410 of_node_put(np);
1411 if (!dp->panel) 1411 if (!dp->panel)
1412 return -EPROBE_DEFER; 1412 return -EPROBE_DEFER;
1413 } else {
1414 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1415 if (endpoint) {
1416 panel_node = of_graph_get_remote_port_parent(endpoint);
1417 if (panel_node) {
1418 dp->panel = of_drm_find_panel(panel_node);
1419 of_node_put(panel_node);
1420 if (!dp->panel)
1421 return -EPROBE_DEFER;
1422 } else {
1423 DRM_ERROR("no port node for panel device.\n");
1424 return -EINVAL;
1425 }
1426 }
1427 }
1428
1429 if (endpoint)
1430 goto out; 1413 goto out;
1414 }
1431 1415
1432 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 1416 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1433 if (endpoint) { 1417 if (endpoint) {
1434 bridge_node = of_graph_get_remote_port_parent(endpoint); 1418 np = of_graph_get_remote_port_parent(endpoint);
1435 if (bridge_node) { 1419 if (np) {
1436 dp->ptn_bridge = of_drm_find_bridge(bridge_node); 1420 /* The remote port can be either a panel or a bridge */
1437 of_node_put(bridge_node); 1421 dp->panel = of_drm_find_panel(np);
1438 if (!dp->ptn_bridge) 1422 if (!dp->panel) {
1439 return -EPROBE_DEFER; 1423 dp->ptn_bridge = of_drm_find_bridge(np);
1440 } else 1424 if (!dp->ptn_bridge) {
1441 return -EPROBE_DEFER; 1425 of_node_put(np);
1426 return -EPROBE_DEFER;
1427 }
1428 }
1429 of_node_put(np);
1430 } else {
1431 DRM_ERROR("no remote endpoint device node found.\n");
1432 return -EINVAL;
1433 }
1434 } else {
1435 DRM_ERROR("no port endpoint subnode found.\n");
1436 return -EINVAL;
1442 } 1437 }
1443 1438
1444out: 1439out:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index d84a498ef099..e977a81af2e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1906,8 +1906,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
1906 return 0; 1906 return 0;
1907} 1907}
1908 1908
1909#ifdef CONFIG_PM 1909static int __maybe_unused exynos_dsi_suspend(struct device *dev)
1910static int exynos_dsi_suspend(struct device *dev)
1911{ 1910{
1912 struct drm_encoder *encoder = dev_get_drvdata(dev); 1911 struct drm_encoder *encoder = dev_get_drvdata(dev);
1913 struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1912 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1938,7 +1937,7 @@ static int exynos_dsi_suspend(struct device *dev)
1938 return 0; 1937 return 0;
1939} 1938}
1940 1939
1941static int exynos_dsi_resume(struct device *dev) 1940static int __maybe_unused exynos_dsi_resume(struct device *dev)
1942{ 1941{
1943 struct drm_encoder *encoder = dev_get_drvdata(dev); 1942 struct drm_encoder *encoder = dev_get_drvdata(dev);
1944 struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1943 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1972,7 +1971,6 @@ err_clk:
1972 1971
1973 return ret; 1972 return ret;
1974} 1973}
1975#endif
1976 1974
1977static const struct dev_pm_ops exynos_dsi_pm_ops = { 1975static const struct dev_pm_ops exynos_dsi_pm_ops = {
1978 SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL) 1976 SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index b5fbc1cbf024..0a5a60005f7e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev)
1289 return 0; 1289 return 0;
1290} 1290}
1291 1291
1292#ifdef CONFIG_PM_SLEEP 1292static int __maybe_unused exynos_mixer_suspend(struct device *dev)
1293static int exynos_mixer_suspend(struct device *dev)
1294{ 1293{
1295 struct mixer_context *ctx = dev_get_drvdata(dev); 1294 struct mixer_context *ctx = dev_get_drvdata(dev);
1296 struct mixer_resources *res = &ctx->mixer_res; 1295 struct mixer_resources *res = &ctx->mixer_res;
@@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev)
1306 return 0; 1305 return 0;
1307} 1306}
1308 1307
1309static int exynos_mixer_resume(struct device *dev) 1308static int __maybe_unused exynos_mixer_resume(struct device *dev)
1310{ 1309{
1311 struct mixer_context *ctx = dev_get_drvdata(dev); 1310 struct mixer_context *ctx = dev_get_drvdata(dev);
1312 struct mixer_resources *res = &ctx->mixer_res; 1311 struct mixer_resources *res = &ctx->mixer_res;
@@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev)
1342 1341
1343 return 0; 1342 return 0;
1344} 1343}
1345#endif
1346 1344
1347static const struct dev_pm_ops exynos_mixer_pm_ops = { 1345static const struct dev_pm_ops exynos_mixer_pm_ops = {
1348 SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL) 1346 SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 533d1e3d4a99..a02112ba1c3d 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
136 case ADV7511_REG_BKSV(3): 136 case ADV7511_REG_BKSV(3):
137 case ADV7511_REG_BKSV(4): 137 case ADV7511_REG_BKSV(4):
138 case ADV7511_REG_DDC_STATUS: 138 case ADV7511_REG_DDC_STATUS:
139 case ADV7511_REG_EDID_READ_CTRL:
139 case ADV7511_REG_BSTATUS(0): 140 case ADV7511_REG_BSTATUS(0):
140 case ADV7511_REG_BSTATUS(1): 141 case ADV7511_REG_BSTATUS(1):
141 case ADV7511_REG_CHIP_ID_HIGH: 142 case ADV7511_REG_CHIP_ID_HIGH:
@@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511)
362{ 363{
363 adv7511->current_edid_segment = -1; 364 adv7511->current_edid_segment = -1;
364 365
365 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
366 ADV7511_INT0_EDID_READY);
367 regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
368 ADV7511_INT1_DDC_ERROR);
369 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, 366 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
370 ADV7511_POWER_POWER_DOWN, 0); 367 ADV7511_POWER_POWER_DOWN, 0);
368 if (adv7511->i2c_main->irq) {
369 /*
370 * Documentation says the INT_ENABLE registers are reset in
371 * POWER_DOWN mode. My 7511w preserved the bits, however.
372 * Still, let's be safe and stick to the documentation.
373 */
374 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
375 ADV7511_INT0_EDID_READY);
376 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
377 ADV7511_INT1_DDC_ERROR);
378 }
371 379
372 /* 380 /*
373 * Per spec it is allowed to pulse the HDP signal to indicate that the 381 * Per spec it is allowed to pulse the HPD signal to indicate that the
374 * EDID information has changed. Some monitors do this when they wakeup 382 * EDID information has changed. Some monitors do this when they wakeup
375 * from standby or are enabled. When the HDP goes low the adv7511 is 383 * from standby or are enabled. When the HPD goes low the adv7511 is
376 * reset and the outputs are disabled which might cause the monitor to 384 * reset and the outputs are disabled which might cause the monitor to
377 * go to standby again. To avoid this we ignore the HDP pin for the 385 * go to standby again. To avoid this we ignore the HPD pin for the
378 * first few seconds after enabling the output. 386 * first few seconds after enabling the output.
379 */ 387 */
380 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, 388 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
381 ADV7511_REG_POWER2_HDP_SRC_MASK, 389 ADV7511_REG_POWER2_HPD_SRC_MASK,
382 ADV7511_REG_POWER2_HDP_SRC_NONE); 390 ADV7511_REG_POWER2_HPD_SRC_NONE);
383 391
384 /* 392 /*
385 * Most of the registers are reset during power down or when HPD is low. 393 * Most of the registers are reset during power down or when HPD is low.
@@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
413 if (ret < 0) 421 if (ret < 0)
414 return false; 422 return false;
415 423
416 if (irq0 & ADV7511_INT0_HDP) { 424 if (irq0 & ADV7511_INT0_HPD) {
417 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), 425 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
418 ADV7511_INT0_HDP); 426 ADV7511_INT0_HPD);
419 return true; 427 return true;
420 } 428 }
421 429
@@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
438 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); 446 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
439 regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); 447 regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
440 448
441 if (irq0 & ADV7511_INT0_HDP && adv7511->encoder) 449 if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
442 drm_helper_hpd_irq_event(adv7511->encoder->dev); 450 drm_helper_hpd_irq_event(adv7511->encoder->dev);
443 451
444 if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { 452 if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
567 575
568 /* Reading the EDID only works if the device is powered */ 576 /* Reading the EDID only works if the device is powered */
569 if (!adv7511->powered) { 577 if (!adv7511->powered) {
570 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
571 ADV7511_INT0_EDID_READY);
572 regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
573 ADV7511_INT1_DDC_ERROR);
574 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, 578 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
575 ADV7511_POWER_POWER_DOWN, 0); 579 ADV7511_POWER_POWER_DOWN, 0);
580 if (adv7511->i2c_main->irq) {
581 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
582 ADV7511_INT0_EDID_READY);
583 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
584 ADV7511_INT1_DDC_ERROR);
585 }
576 adv7511->current_edid_segment = -1; 586 adv7511->current_edid_segment = -1;
577 } 587 }
578 588
@@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
638 if (adv7511->status == connector_status_connected) 648 if (adv7511->status == connector_status_connected)
639 status = connector_status_disconnected; 649 status = connector_status_disconnected;
640 } else { 650 } else {
641 /* Renable HDP sensing */ 651 /* Renable HPD sensing */
642 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, 652 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
643 ADV7511_REG_POWER2_HDP_SRC_MASK, 653 ADV7511_REG_POWER2_HPD_SRC_MASK,
644 ADV7511_REG_POWER2_HDP_SRC_BOTH); 654 ADV7511_REG_POWER2_HPD_SRC_BOTH);
645 } 655 }
646 656
647 adv7511->status = status; 657 adv7511->status = status;
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 6599ed538426..38515b30cedf 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -90,7 +90,7 @@
90#define ADV7511_CSC_ENABLE BIT(7) 90#define ADV7511_CSC_ENABLE BIT(7)
91#define ADV7511_CSC_UPDATE_MODE BIT(5) 91#define ADV7511_CSC_UPDATE_MODE BIT(5)
92 92
93#define ADV7511_INT0_HDP BIT(7) 93#define ADV7511_INT0_HPD BIT(7)
94#define ADV7511_INT0_VSYNC BIT(5) 94#define ADV7511_INT0_VSYNC BIT(5)
95#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4) 95#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
96#define ADV7511_INT0_EDID_READY BIT(2) 96#define ADV7511_INT0_EDID_READY BIT(2)
@@ -157,11 +157,11 @@
157#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) 157#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
158#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) 158#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
159 159
160#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0 160#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
161#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00 161#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
162#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40 162#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
163#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80 163#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
164#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0 164#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
165#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4) 165#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
166#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0) 166#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
167 167
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index fcd77b27514d..051eab33e4c7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -10,7 +10,6 @@ config DRM_I915
10 # the shmem_readpage() which depends upon tmpfs 10 # the shmem_readpage() which depends upon tmpfs
11 select SHMEM 11 select SHMEM
12 select TMPFS 12 select TMPFS
13 select STOP_MACHINE
14 select DRM_KMS_HELPER 13 select DRM_KMS_HELPER
15 select DRM_PANEL 14 select DRM_PANEL
16 select DRM_MIPI_DSI 15 select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3ac616d7363b..f357058c74d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -501,7 +501,9 @@ void intel_detect_pch(struct drm_device *dev)
501 WARN_ON(!IS_SKYLAKE(dev) && 501 WARN_ON(!IS_SKYLAKE(dev) &&
502 !IS_KABYLAKE(dev)); 502 !IS_KABYLAKE(dev));
503 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 503 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
504 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) { 504 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
505 pch->subsystem_vendor == 0x1af4 &&
506 pch->subsystem_device == 0x1100)) {
505 dev_priv->pch_type = intel_virt_detect_pch(dev); 507 dev_priv->pch_type = intel_virt_detect_pch(dev);
506 } else 508 } else
507 continue; 509 continue;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2f00828ccc6e..5feb65725c04 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2946,7 +2946,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2946 struct i915_vma *vma; 2946 struct i915_vma *vma;
2947 u64 offset; 2947 u64 offset;
2948 2948
2949 intel_fill_fb_ggtt_view(&view, intel_plane->base.fb, 2949 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2950 intel_plane->base.state); 2950 intel_plane->base.state);
2951 2951
2952 vma = i915_gem_obj_to_ggtt_view(obj, &view); 2952 vma = i915_gem_obj_to_ggtt_view(obj, &view);
@@ -12075,11 +12075,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
12075 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12075 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12076 } 12076 }
12077 12077
12078 /* Clamp bpp to 8 on screens without EDID 1.4 */ 12078 /* Clamp bpp to default limit on screens without EDID 1.4 */
12079 if (connector->base.display_info.bpc == 0 && bpp > 24) { 12079 if (connector->base.display_info.bpc == 0) {
12080 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 12080 int type = connector->base.connector_type;
12081 bpp); 12081 int clamp_bpp = 24;
12082 pipe_config->pipe_bpp = 24; 12082
12083 /* Fall back to 18 bpp when DP sink capability is unknown. */
12084 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12085 type == DRM_MODE_CONNECTOR_eDP)
12086 clamp_bpp = 18;
12087
12088 if (bpp > clamp_bpp) {
12089 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12090 bpp, clamp_bpp);
12091 pipe_config->pipe_bpp = clamp_bpp;
12092 }
12083 } 12093 }
12084} 12094}
12085 12095
@@ -13883,11 +13893,12 @@ intel_check_primary_plane(struct drm_plane *plane,
13883 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13893 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13884 bool can_position = false; 13894 bool can_position = false;
13885 13895
13886 /* use scaler when colorkey is not required */ 13896 if (INTEL_INFO(plane->dev)->gen >= 9) {
13887 if (INTEL_INFO(plane->dev)->gen >= 9 && 13897 /* use scaler when colorkey is not required */
13888 state->ckey.flags == I915_SET_COLORKEY_NONE) { 13898 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13889 min_scale = 1; 13899 min_scale = 1;
13890 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13900 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13901 }
13891 can_position = true; 13902 can_position = true;
13892 } 13903 }
13893 13904
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3aa614731d7e..f1fa756c5d5d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1707,6 +1707,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1707 if (flush_domains) { 1707 if (flush_domains) {
1708 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1708 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1709 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1709 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1710 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1710 flags |= PIPE_CONTROL_FLUSH_ENABLE; 1711 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1711 } 1712 }
1712 1713
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 339701d7a9a5..40c6aff57256 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
331 if (flush_domains) { 331 if (flush_domains) {
332 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 332 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
333 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 333 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
334 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
334 flags |= PIPE_CONTROL_FLUSH_ENABLE; 335 flags |= PIPE_CONTROL_FLUSH_ENABLE;
335 } 336 }
336 if (invalidate_domains) { 337 if (invalidate_domains) {
@@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
403 if (flush_domains) { 404 if (flush_domains) {
404 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 405 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
405 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 406 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
407 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
406 flags |= PIPE_CONTROL_FLUSH_ENABLE; 408 flags |= PIPE_CONTROL_FLUSH_ENABLE;
407 } 409 }
408 if (invalidate_domains) { 410 if (invalidate_domains) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 84d45633d28c..fb6ad143873f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <drm/radeon_drm.h> 35#include <drm/radeon_drm.h>
36#include <drm/drm_cache.h>
36#include "radeon.h" 37#include "radeon.h"
37#include "radeon_trace.h" 38#include "radeon_trace.h"
38 39
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
245 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 246 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
246 "better performance thanks to write-combining\n"); 247 "better performance thanks to write-combining\n");
247 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 248 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
249#else
250 /* For architectures that don't support WC memory,
251 * mask out the WC flag from the BO
252 */
253 if (!drm_arch_can_wc_memory())
254 bo->flags &= ~RADEON_GEM_GTT_WC;
248#endif 255#endif
249 256
250 radeon_ttm_placement_from_domain(bo, domain); 257 radeon_ttm_placement_from_domain(bo, domain);
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 7bfb063029d8..461a0558bca4 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,4 +35,13 @@
35 35
36void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 36void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
37 37
38static inline bool drm_arch_can_wc_memory(void)
39{
40#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
41 return false;
42#else
43 return true;
44#endif
45}
46
38#endif 47#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 24ab1787b771..fdb47051d549 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
44/** 44/**
45 * struct drm_dp_mst_port - MST port 45 * struct drm_dp_mst_port - MST port
46 * @kref: reference count for this port. 46 * @kref: reference count for this port.
47 * @guid_valid: for DP 1.2 devices if we have validated the GUID.
48 * @guid: guid for DP 1.2 device on this port.
49 * @port_num: port number 47 * @port_num: port number
50 * @input: if this port is an input port. 48 * @input: if this port is an input port.
51 * @mcs: message capability status - DP 1.2 spec. 49 * @mcs: message capability status - DP 1.2 spec.
@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
70struct drm_dp_mst_port { 68struct drm_dp_mst_port {
71 struct kref kref; 69 struct kref kref;
72 70
73 /* if dpcd 1.2 device is on this port - its GUID info */
74 bool guid_valid;
75 u8 guid[16];
76
77 u8 port_num; 71 u8 port_num;
78 bool input; 72 bool input;
79 bool mcs; 73 bool mcs;
@@ -110,10 +104,12 @@ struct drm_dp_mst_port {
110 * @tx_slots: transmission slots for this device. 104 * @tx_slots: transmission slots for this device.
111 * @last_seqno: last sequence number used to talk to this. 105 * @last_seqno: last sequence number used to talk to this.
112 * @link_address_sent: if a link address message has been sent to this device yet. 106 * @link_address_sent: if a link address message has been sent to this device yet.
107 * @guid: guid for DP 1.2 branch device. port under this branch can be
108 * identified by port #.
113 * 109 *
114 * This structure represents an MST branch device, there is one 110 * This structure represents an MST branch device, there is one
115 * primary branch device at the root, along with any others connected 111 * primary branch device at the root, along with any other branches connected
116 * to downstream ports 112 * to downstream port of parent branches.
117 */ 113 */
118struct drm_dp_mst_branch { 114struct drm_dp_mst_branch {
119 struct kref kref; 115 struct kref kref;
@@ -132,6 +128,9 @@ struct drm_dp_mst_branch {
132 struct drm_dp_sideband_msg_tx *tx_slots[2]; 128 struct drm_dp_sideband_msg_tx *tx_slots[2];
133 int last_seqno; 129 int last_seqno;
134 bool link_address_sent; 130 bool link_address_sent;
131
132 /* global unique identifier to identify branch devices */
133 u8 guid[16];
135}; 134};
136 135
137 136
@@ -406,11 +405,9 @@ struct drm_dp_payload {
406 * @conn_base_id: DRM connector ID this mgr is connected to. 405 * @conn_base_id: DRM connector ID this mgr is connected to.
407 * @down_rep_recv: msg receiver state for down replies. 406 * @down_rep_recv: msg receiver state for down replies.
408 * @up_req_recv: msg receiver state for up requests. 407 * @up_req_recv: msg receiver state for up requests.
409 * @lock: protects mst state, primary, guid, dpcd. 408 * @lock: protects mst state, primary, dpcd.
410 * @mst_state: if this manager is enabled for an MST capable port. 409 * @mst_state: if this manager is enabled for an MST capable port.
411 * @mst_primary: pointer to the primary branch device. 410 * @mst_primary: pointer to the primary branch device.
412 * @guid_valid: GUID valid for the primary branch device.
413 * @guid: GUID for primary port.
414 * @dpcd: cache of DPCD for primary port. 411 * @dpcd: cache of DPCD for primary port.
415 * @pbn_div: PBN to slots divisor. 412 * @pbn_div: PBN to slots divisor.
416 * 413 *
@@ -432,13 +429,11 @@ struct drm_dp_mst_topology_mgr {
432 struct drm_dp_sideband_msg_rx up_req_recv; 429 struct drm_dp_sideband_msg_rx up_req_recv;
433 430
434 /* pointer to info about the initial MST device */ 431 /* pointer to info about the initial MST device */
435 struct mutex lock; /* protects mst_state + primary + guid + dpcd */ 432 struct mutex lock; /* protects mst_state + primary + dpcd */
436 433
437 bool mst_state; 434 bool mst_state;
438 struct drm_dp_mst_branch *mst_primary; 435 struct drm_dp_mst_branch *mst_primary;
439 /* primary MST device GUID */ 436
440 bool guid_valid;
441 u8 guid[16];
442 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 437 u8 dpcd[DP_RECEIVER_CAP_SIZE];
443 u8 sink_count; 438 u8 sink_count;
444 int pbn_div; 439 int pbn_div;
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index d639049a613d..553210c02ee0 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
73#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) 73#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
74#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) 74#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
75#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) 75#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
76#define DRM_FIXED_EPSILON 1LL
77#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
76 78
77static inline s64 drm_int2fixp(int a) 79static inline s64 drm_int2fixp(int a)
78{ 80{
79 return ((s64)a) << DRM_FIXED_POINT; 81 return ((s64)a) << DRM_FIXED_POINT;
80} 82}
81 83
82static inline int drm_fixp2int(int64_t a) 84static inline int drm_fixp2int(s64 a)
83{ 85{
84 return ((s64)a) >> DRM_FIXED_POINT; 86 return ((s64)a) >> DRM_FIXED_POINT;
85} 87}
86 88
87static inline unsigned drm_fixp_msbset(int64_t a) 89static inline int drm_fixp2int_ceil(s64 a)
90{
91 if (a > 0)
92 return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
93 else
94 return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
95}
96
97static inline unsigned drm_fixp_msbset(s64 a)
88{ 98{
89 unsigned shift, sign = (a >> 63) & 1; 99 unsigned shift, sign = (a >> 63) & 1;
90 100
@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
136 return result; 146 return result;
137} 147}
138 148
149static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
150{
151 s64 res;
152 bool a_neg = a < 0;
153 bool b_neg = b < 0;
154 u64 a_abs = a_neg ? -a : a;
155 u64 b_abs = b_neg ? -b : b;
156 u64 rem;
157
158 /* determine integer part */
159 u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
160
161 /* determine fractional part */
162 {
163 u32 i = DRM_FIXED_POINT;
164
165 do {
166 rem <<= 1;
167 res_abs <<= 1;
168 if (rem >= b_abs) {
169 res_abs |= 1;
170 rem -= b_abs;
171 }
172 } while (--i != 0);
173 }
174
175 /* round up LSB */
176 {
177 u64 summand = (rem << 1) >= b_abs;
178
179 res_abs += summand;
180 }
181
182 res = (s64) res_abs;
183 if (a_neg ^ b_neg)
184 res = -res;
185 return res;
186}
187
139static inline s64 drm_fixp_exp(s64 x) 188static inline s64 drm_fixp_exp(s64 x)
140{ 189{
141 s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); 190 s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);