aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
45 files changed, 440 insertions, 288 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2e3a0543760d..e3281d4e3e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
765 return ret; 765 return ret;
766} 766}
767 767
768static void amdgpu_connector_destroy(struct drm_connector *connector) 768static void amdgpu_connector_unregister(struct drm_connector *connector)
769{ 769{
770 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 770 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
771 771
@@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
773 drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); 773 drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
774 amdgpu_connector->ddc_bus->has_aux = false; 774 amdgpu_connector->ddc_bus->has_aux = false;
775 } 775 }
776}
777
778static void amdgpu_connector_destroy(struct drm_connector *connector)
779{
780 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
781
776 amdgpu_connector_free_edid(connector); 782 amdgpu_connector_free_edid(connector);
777 kfree(amdgpu_connector->con_priv); 783 kfree(amdgpu_connector->con_priv);
778 drm_connector_unregister(connector); 784 drm_connector_unregister(connector);
@@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
826 .dpms = drm_helper_connector_dpms, 832 .dpms = drm_helper_connector_dpms,
827 .detect = amdgpu_connector_lvds_detect, 833 .detect = amdgpu_connector_lvds_detect,
828 .fill_modes = drm_helper_probe_single_connector_modes, 834 .fill_modes = drm_helper_probe_single_connector_modes,
835 .early_unregister = amdgpu_connector_unregister,
829 .destroy = amdgpu_connector_destroy, 836 .destroy = amdgpu_connector_destroy,
830 .set_property = amdgpu_connector_set_lcd_property, 837 .set_property = amdgpu_connector_set_lcd_property,
831}; 838};
@@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
936 .dpms = drm_helper_connector_dpms, 943 .dpms = drm_helper_connector_dpms,
937 .detect = amdgpu_connector_vga_detect, 944 .detect = amdgpu_connector_vga_detect,
938 .fill_modes = drm_helper_probe_single_connector_modes, 945 .fill_modes = drm_helper_probe_single_connector_modes,
946 .early_unregister = amdgpu_connector_unregister,
939 .destroy = amdgpu_connector_destroy, 947 .destroy = amdgpu_connector_destroy,
940 .set_property = amdgpu_connector_set_property, 948 .set_property = amdgpu_connector_set_property,
941}; 949};
@@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
1203 .detect = amdgpu_connector_dvi_detect, 1211 .detect = amdgpu_connector_dvi_detect,
1204 .fill_modes = drm_helper_probe_single_connector_modes, 1212 .fill_modes = drm_helper_probe_single_connector_modes,
1205 .set_property = amdgpu_connector_set_property, 1213 .set_property = amdgpu_connector_set_property,
1214 .early_unregister = amdgpu_connector_unregister,
1206 .destroy = amdgpu_connector_destroy, 1215 .destroy = amdgpu_connector_destroy,
1207 .force = amdgpu_connector_dvi_force, 1216 .force = amdgpu_connector_dvi_force,
1208}; 1217};
@@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
1493 .detect = amdgpu_connector_dp_detect, 1502 .detect = amdgpu_connector_dp_detect,
1494 .fill_modes = drm_helper_probe_single_connector_modes, 1503 .fill_modes = drm_helper_probe_single_connector_modes,
1495 .set_property = amdgpu_connector_set_property, 1504 .set_property = amdgpu_connector_set_property,
1505 .early_unregister = amdgpu_connector_unregister,
1496 .destroy = amdgpu_connector_destroy, 1506 .destroy = amdgpu_connector_destroy,
1497 .force = amdgpu_connector_dvi_force, 1507 .force = amdgpu_connector_dvi_force,
1498}; 1508};
@@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
1502 .detect = amdgpu_connector_dp_detect, 1512 .detect = amdgpu_connector_dp_detect,
1503 .fill_modes = drm_helper_probe_single_connector_modes, 1513 .fill_modes = drm_helper_probe_single_connector_modes,
1504 .set_property = amdgpu_connector_set_lcd_property, 1514 .set_property = amdgpu_connector_set_lcd_property,
1515 .early_unregister = amdgpu_connector_unregister,
1505 .destroy = amdgpu_connector_destroy, 1516 .destroy = amdgpu_connector_destroy,
1506 .force = amdgpu_connector_dvi_force, 1517 .force = amdgpu_connector_dvi_force,
1507}; 1518};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e203e5561107..a5e2fcbef0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
43 ctx->rings[i].sequence = 1; 43 ctx->rings[i].sequence = 1;
44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; 44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
45 } 45 }
46
47 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
48
46 /* create context entity for each ring */ 49 /* create context entity for each ring */
47 for (i = 0; i < adev->num_rings; i++) { 50 for (i = 0; i < adev->num_rings; i++) {
48 struct amdgpu_ring *ring = adev->rings[i]; 51 struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7dbe85d67d26..b4f4a9239069 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1408 for (i = 0; i < adev->num_ip_blocks; i++) { 1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_block_status[i].valid) 1409 if (!adev->ip_block_status[i].valid)
1410 continue; 1410 continue;
1411 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
1412 adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
1413 continue;
1414 /* enable clockgating to save power */
1415 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1416 AMD_CG_STATE_GATE);
1417 if (r) {
1418 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
1419 return r;
1420 }
1421 if (adev->ip_blocks[i].funcs->late_init) { 1411 if (adev->ip_blocks[i].funcs->late_init) {
1422 r = adev->ip_blocks[i].funcs->late_init((void *)adev); 1412 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
1423 if (r) { 1413 if (r) {
@@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1426 } 1416 }
1427 adev->ip_block_status[i].late_initialized = true; 1417 adev->ip_block_status[i].late_initialized = true;
1428 } 1418 }
1419 /* skip CG for VCE/UVD, it's handled specially */
1420 if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
1421 adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
1422 /* enable clockgating to save power */
1423 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1424 AMD_CG_STATE_GATE);
1425 if (r) {
1426 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1427 adev->ip_blocks[i].funcs->name, r);
1428 return r;
1429 }
1430 }
1429 } 1431 }
1430 1432
1431 return 0; 1433 return 0;
@@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1435{ 1437{
1436 int i, r; 1438 int i, r;
1437 1439
1440 /* need to disable SMC first */
1441 for (i = 0; i < adev->num_ip_blocks; i++) {
1442 if (!adev->ip_block_status[i].hw)
1443 continue;
1444 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
1445 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1446 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1447 AMD_CG_STATE_UNGATE);
1448 if (r) {
1449 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1450 adev->ip_blocks[i].funcs->name, r);
1451 return r;
1452 }
1453 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1454 /* XXX handle errors */
1455 if (r) {
1456 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1457 adev->ip_blocks[i].funcs->name, r);
1458 }
1459 adev->ip_block_status[i].hw = false;
1460 break;
1461 }
1462 }
1463
1438 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1464 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1439 if (!adev->ip_block_status[i].hw) 1465 if (!adev->ip_block_status[i].hw)
1440 continue; 1466 continue;
@@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2073 if (!adev->ip_block_status[i].valid) 2099 if (!adev->ip_block_status[i].valid)
2074 continue; 2100 continue;
2075 if (adev->ip_blocks[i].funcs->check_soft_reset) 2101 if (adev->ip_blocks[i].funcs->check_soft_reset)
2076 adev->ip_blocks[i].funcs->check_soft_reset(adev); 2102 adev->ip_block_status[i].hang =
2103 adev->ip_blocks[i].funcs->check_soft_reset(adev);
2077 if (adev->ip_block_status[i].hang) { 2104 if (adev->ip_block_status[i].hang) {
2078 DRM_INFO("IP block:%d is hang!\n", i); 2105 DRM_INFO("IP block:%d is hang!\n", i);
2079 asic_hang = true; 2106 asic_hang = true;
@@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2102 2129
2103static bool amdgpu_need_full_reset(struct amdgpu_device *adev) 2130static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2104{ 2131{
2105 if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || 2132 int i;
2106 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || 2133
2107 adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || 2134 for (i = 0; i < adev->num_ip_blocks; i++) {
2108 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { 2135 if (!adev->ip_block_status[i].valid)
2109 DRM_INFO("Some block need full reset!\n"); 2136 continue;
2110 return true; 2137 if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
2138 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
2139 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
2140 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
2141 if (adev->ip_block_status[i].hang) {
2142 DRM_INFO("Some block need full reset!\n");
2143 return true;
2144 }
2145 }
2111 } 2146 }
2112 return false; 2147 return false;
2113} 2148}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b7d7..14f57d9915e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
113 printk("\n"); 113 printk("\n");
114} 114}
115 115
116
116u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 117u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
117{ 118{
118 struct drm_device *dev = adev->ddev; 119 struct drm_device *dev = adev->ddev;
119 struct drm_crtc *crtc; 120 struct drm_crtc *crtc;
120 struct amdgpu_crtc *amdgpu_crtc; 121 struct amdgpu_crtc *amdgpu_crtc;
121 u32 line_time_us, vblank_lines; 122 u32 vblank_in_pixels;
122 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 123 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
123 124
124 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 125 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
125 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 126 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
126 amdgpu_crtc = to_amdgpu_crtc(crtc); 127 amdgpu_crtc = to_amdgpu_crtc(crtc);
127 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 128 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
128 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 129 vblank_in_pixels =
129 amdgpu_crtc->hw_mode.clock; 130 amdgpu_crtc->hw_mode.crtc_htotal *
130 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 131 (amdgpu_crtc->hw_mode.crtc_vblank_end -
131 amdgpu_crtc->hw_mode.crtc_vdisplay + 132 amdgpu_crtc->hw_mode.crtc_vdisplay +
132 (amdgpu_crtc->v_border * 2); 133 (amdgpu_crtc->v_border * 2));
133 vblank_time_us = vblank_lines * line_time_us; 134
135 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
134 break; 136 break;
135 } 137 }
136 } 138 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e1fa8731d1e2..3cb5e903cd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
345 ent = debugfs_create_file(name, 345 ent = debugfs_create_file(name,
346 S_IFREG | S_IRUGO, root, 346 S_IFREG | S_IRUGO, root,
347 ring, &amdgpu_debugfs_ring_fops); 347 ring, &amdgpu_debugfs_ring_fops);
348 if (IS_ERR(ent)) 348 if (!ent)
349 return PTR_ERR(ent); 349 return -ENOMEM;
350 350
351 i_size_write(ent->d_inode, ring->ring_size + 12); 351 i_size_write(ent->d_inode, ring->ring_size + 12);
352 ring->ent = ent; 352 ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 887483b8b818..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
555int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) 555int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
556{ 556{
557 struct amdgpu_ttm_tt *gtt = (void *)ttm; 557 struct amdgpu_ttm_tt *gtt = (void *)ttm;
558 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 558 unsigned int flags = 0;
559 unsigned pinned = 0; 559 unsigned pinned = 0;
560 int r; 560 int r;
561 561
562 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
563 flags |= FOLL_WRITE;
564
562 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 565 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
563 /* check that we only use anonymous memory 566 /* check that we only use anonymous memory
564 to prevent problems with writeback */ 567 to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
581 list_add(&guptask.list, &gtt->guptasks); 584 list_add(&guptask.list, &gtt->guptasks);
582 spin_unlock(&gtt->guptasklock); 585 spin_unlock(&gtt->guptasklock);
583 586
584 r = get_user_pages(userptr, num_pages, write, 0, p, NULL); 587 r = get_user_pages(userptr, num_pages, flags, p, NULL);
585 588
586 spin_lock(&gtt->guptasklock); 589 spin_lock(&gtt->guptasklock);
587 list_del(&guptask.list); 590 list_del(&guptask.list);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f80a0834e889..3c082e143730 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
1514 return 0; 1514 return 0;
1515} 1515}
1516 1516
1517/* borrowed from KV, need future unify */
1518static int cz_dpm_get_temperature(struct amdgpu_device *adev) 1517static int cz_dpm_get_temperature(struct amdgpu_device *adev)
1519{ 1518{
1520 int actual_temp = 0; 1519 int actual_temp = 0;
1521 uint32_t temp = RREG32_SMC(0xC0300E0C); 1520 uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
1521 uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1522 1522
1523 if (temp) 1523 if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1524 actual_temp = 1000 * ((temp / 8) - 49); 1524 actual_temp = 1000 * ((temp / 8) - 49);
1525 else
1526 actual_temp = 1000 * (temp / 8);
1525 1527
1526 return actual_temp; 1528 return actual_temp;
1527} 1529}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 613ebb7ed50f..4108c686aa7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
3188 return 0; 3188 return 0;
3189} 3189}
3190 3190
3191static int dce_v10_0_check_soft_reset(void *handle) 3191static bool dce_v10_0_check_soft_reset(void *handle)
3192{ 3192{
3193 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3193 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3194 3194
3195 if (dce_v10_0_is_display_hung(adev)) 3195 return dce_v10_0_is_display_hung(adev);
3196 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
3197 else
3198 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
3199
3200 return 0;
3201} 3196}
3202 3197
3203static int dce_v10_0_soft_reset(void *handle) 3198static int dce_v10_0_soft_reset(void *handle)
@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
3205 u32 srbm_soft_reset = 0, tmp; 3200 u32 srbm_soft_reset = 0, tmp;
3206 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3201 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3207 3202
3208 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
3209 return 0;
3210
3211 if (dce_v10_0_is_display_hung(adev)) 3203 if (dce_v10_0_is_display_hung(adev))
3212 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 3204 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3213 3205
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6c6ff57b1c95..ee6a48a09214 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
4087static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) 4087static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4088{ 4088{
4089 int r; 4089 int r;
4090 u32 tmp;
4090 4091
4091 gfx_v8_0_rlc_stop(adev); 4092 gfx_v8_0_rlc_stop(adev);
4092 4093
4093 /* disable CG */ 4094 /* disable CG */
4094 WREG32(mmRLC_CGCG_CGLS_CTRL, 0); 4095 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
4096 tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
4097 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4098 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
4095 if (adev->asic_type == CHIP_POLARIS11 || 4099 if (adev->asic_type == CHIP_POLARIS11 ||
4096 adev->asic_type == CHIP_POLARIS10) 4100 adev->asic_type == CHIP_POLARIS10) {
4097 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); 4101 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
4102 tmp &= ~0x3;
4103 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
4104 }
4098 4105
4099 /* disable PG */ 4106 /* disable PG */
4100 WREG32(mmRLC_PG_CNTL, 0); 4107 WREG32(mmRLC_PG_CNTL, 0);
@@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
5137 return -ETIMEDOUT; 5144 return -ETIMEDOUT;
5138} 5145}
5139 5146
5140static int gfx_v8_0_check_soft_reset(void *handle) 5147static bool gfx_v8_0_check_soft_reset(void *handle)
5141{ 5148{
5142 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5149 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5143 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5150 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
5189 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); 5196 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
5190 5197
5191 if (grbm_soft_reset || srbm_soft_reset) { 5198 if (grbm_soft_reset || srbm_soft_reset) {
5192 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
5193 adev->gfx.grbm_soft_reset = grbm_soft_reset; 5199 adev->gfx.grbm_soft_reset = grbm_soft_reset;
5194 adev->gfx.srbm_soft_reset = srbm_soft_reset; 5200 adev->gfx.srbm_soft_reset = srbm_soft_reset;
5201 return true;
5195 } else { 5202 } else {
5196 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
5197 adev->gfx.grbm_soft_reset = 0; 5203 adev->gfx.grbm_soft_reset = 0;
5198 adev->gfx.srbm_soft_reset = 0; 5204 adev->gfx.srbm_soft_reset = 0;
5205 return false;
5199 } 5206 }
5200
5201 return 0;
5202} 5207}
5203 5208
5204static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, 5209static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
@@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
5226 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5231 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5227 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5232 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5228 5233
5229 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) 5234 if ((!adev->gfx.grbm_soft_reset) &&
5235 (!adev->gfx.srbm_soft_reset))
5230 return 0; 5236 return 0;
5231 5237
5232 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5238 grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
5264 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5270 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5265 u32 tmp; 5271 u32 tmp;
5266 5272
5267 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) 5273 if ((!adev->gfx.grbm_soft_reset) &&
5274 (!adev->gfx.srbm_soft_reset))
5268 return 0; 5275 return 0;
5269 5276
5270 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5277 grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5334 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5341 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5335 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5342 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5336 5343
5337 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) 5344 if ((!adev->gfx.grbm_soft_reset) &&
5345 (!adev->gfx.srbm_soft_reset))
5338 return 0; 5346 return 0;
5339 5347
5340 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5348 grbm_soft_reset = adev->gfx.grbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1b319f5bc696..c22ef140a542 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
1099 1099
1100} 1100}
1101 1101
1102static int gmc_v8_0_check_soft_reset(void *handle) 1102static bool gmc_v8_0_check_soft_reset(void *handle)
1103{ 1103{
1104 u32 srbm_soft_reset = 0; 1104 u32 srbm_soft_reset = 0;
1105 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
1116 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1116 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1117 } 1117 }
1118 if (srbm_soft_reset) { 1118 if (srbm_soft_reset) {
1119 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
1120 adev->mc.srbm_soft_reset = srbm_soft_reset; 1119 adev->mc.srbm_soft_reset = srbm_soft_reset;
1120 return true;
1121 } else { 1121 } else {
1122 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
1123 adev->mc.srbm_soft_reset = 0; 1122 adev->mc.srbm_soft_reset = 0;
1123 return false;
1124 } 1124 }
1125 return 0;
1126} 1125}
1127 1126
1128static int gmc_v8_0_pre_soft_reset(void *handle) 1127static int gmc_v8_0_pre_soft_reset(void *handle)
1129{ 1128{
1130 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1129 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1131 1130
1132 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) 1131 if (!adev->mc.srbm_soft_reset)
1133 return 0; 1132 return 0;
1134 1133
1135 gmc_v8_0_mc_stop(adev, &adev->mc.save); 1134 gmc_v8_0_mc_stop(adev, &adev->mc.save);
@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
1145 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1146 u32 srbm_soft_reset; 1145 u32 srbm_soft_reset;
1147 1146
1148 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) 1147 if (!adev->mc.srbm_soft_reset)
1149 return 0; 1148 return 0;
1150 srbm_soft_reset = adev->mc.srbm_soft_reset; 1149 srbm_soft_reset = adev->mc.srbm_soft_reset;
1151 1150
@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
1175{ 1174{
1176 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1175 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1177 1176
1178 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) 1177 if (!adev->mc.srbm_soft_reset)
1179 return 0; 1178 return 0;
1180 1179
1181 gmc_v8_0_mc_resume(adev, &adev->mc.save); 1180 gmc_v8_0_mc_resume(adev, &adev->mc.save);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index f325fd86430b..a9d10941fb53 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
1268 return -ETIMEDOUT; 1268 return -ETIMEDOUT;
1269} 1269}
1270 1270
1271static int sdma_v3_0_check_soft_reset(void *handle) 1271static bool sdma_v3_0_check_soft_reset(void *handle)
1272{ 1272{
1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274 u32 srbm_soft_reset = 0; 1274 u32 srbm_soft_reset = 0;
@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
1281 } 1281 }
1282 1282
1283 if (srbm_soft_reset) { 1283 if (srbm_soft_reset) {
1284 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
1285 adev->sdma.srbm_soft_reset = srbm_soft_reset; 1284 adev->sdma.srbm_soft_reset = srbm_soft_reset;
1285 return true;
1286 } else { 1286 } else {
1287 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
1288 adev->sdma.srbm_soft_reset = 0; 1287 adev->sdma.srbm_soft_reset = 0;
1288 return false;
1289 } 1289 }
1290
1291 return 0;
1292} 1290}
1293 1291
1294static int sdma_v3_0_pre_soft_reset(void *handle) 1292static int sdma_v3_0_pre_soft_reset(void *handle)
@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 u32 srbm_soft_reset = 0; 1295 u32 srbm_soft_reset = 0;
1298 1296
1299 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) 1297 if (!adev->sdma.srbm_soft_reset)
1300 return 0; 1298 return 0;
1301 1299
1302 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1300 srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
1315 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 u32 srbm_soft_reset = 0; 1314 u32 srbm_soft_reset = 0;
1317 1315
1318 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) 1316 if (!adev->sdma.srbm_soft_reset)
1319 return 0; 1317 return 0;
1320 1318
1321 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1319 srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
1335 u32 srbm_soft_reset = 0; 1333 u32 srbm_soft_reset = 0;
1336 u32 tmp; 1334 u32 tmp;
1337 1335
1338 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) 1336 if (!adev->sdma.srbm_soft_reset)
1339 return 0; 1337 return 0;
1340 1338
1341 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1339 srbm_soft_reset = adev->sdma.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8bd08925b370..3de7bca5854b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3499 max_sclk = 75000; 3499 max_sclk = 75000;
3500 max_mclk = 80000; 3500 max_mclk = 80000;
3501 } 3501 }
3502 /* Limit clocks for some HD8600 parts */
3503 if (adev->pdev->device == 0x6660 &&
3504 adev->pdev->revision == 0x83) {
3505 max_sclk = 75000;
3506 max_mclk = 80000;
3507 }
3502 3508
3503 if (rps->vce_active) { 3509 if (rps->vce_active) {
3504 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 3510 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index d127d59f953a..b4ea229bb449 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
373 return -ETIMEDOUT; 373 return -ETIMEDOUT;
374} 374}
375 375
376static int tonga_ih_check_soft_reset(void *handle) 376static bool tonga_ih_check_soft_reset(void *handle)
377{ 377{
378 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379 u32 srbm_soft_reset = 0; 379 u32 srbm_soft_reset = 0;
@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
384 SOFT_RESET_IH, 1); 384 SOFT_RESET_IH, 1);
385 385
386 if (srbm_soft_reset) { 386 if (srbm_soft_reset) {
387 adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
388 adev->irq.srbm_soft_reset = srbm_soft_reset; 387 adev->irq.srbm_soft_reset = srbm_soft_reset;
388 return true;
389 } else { 389 } else {
390 adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
391 adev->irq.srbm_soft_reset = 0; 390 adev->irq.srbm_soft_reset = 0;
391 return false;
392 } 392 }
393
394 return 0;
395} 393}
396 394
397static int tonga_ih_pre_soft_reset(void *handle) 395static int tonga_ih_pre_soft_reset(void *handle)
398{ 396{
399 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 397 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
400 398
401 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) 399 if (!adev->irq.srbm_soft_reset)
402 return 0; 400 return 0;
403 401
404 return tonga_ih_hw_fini(adev); 402 return tonga_ih_hw_fini(adev);
@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
408{ 406{
409 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
410 408
411 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) 409 if (!adev->irq.srbm_soft_reset)
412 return 0; 410 return 0;
413 411
414 return tonga_ih_hw_init(adev); 412 return tonga_ih_hw_init(adev);
@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
419 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 417 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
420 u32 srbm_soft_reset; 418 u32 srbm_soft_reset;
421 419
422 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) 420 if (!adev->irq.srbm_soft_reset)
423 return 0; 421 return 0;
424 srbm_soft_reset = adev->irq.srbm_soft_reset; 422 srbm_soft_reset = adev->irq.srbm_soft_reset;
425 423
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0fd9f21ed95..ab3df6d75656 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
770} 770}
771 771
772#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd 772#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
773static int uvd_v6_0_check_soft_reset(void *handle) 773static bool uvd_v6_0_check_soft_reset(void *handle)
774{ 774{
775 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 775 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
776 u32 srbm_soft_reset = 0; 776 u32 srbm_soft_reset = 0;
@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
782 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); 782 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
783 783
784 if (srbm_soft_reset) { 784 if (srbm_soft_reset) {
785 adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
786 adev->uvd.srbm_soft_reset = srbm_soft_reset; 785 adev->uvd.srbm_soft_reset = srbm_soft_reset;
786 return true;
787 } else { 787 } else {
788 adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
789 adev->uvd.srbm_soft_reset = 0; 788 adev->uvd.srbm_soft_reset = 0;
789 return false;
790 } 790 }
791 return 0;
792} 791}
792
793static int uvd_v6_0_pre_soft_reset(void *handle) 793static int uvd_v6_0_pre_soft_reset(void *handle)
794{ 794{
795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
796 796
797 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) 797 if (!adev->uvd.srbm_soft_reset)
798 return 0; 798 return 0;
799 799
800 uvd_v6_0_stop(adev); 800 uvd_v6_0_stop(adev);
@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
806 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
807 u32 srbm_soft_reset; 807 u32 srbm_soft_reset;
808 808
809 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) 809 if (!adev->uvd.srbm_soft_reset)
810 return 0; 810 return 0;
811 srbm_soft_reset = adev->uvd.srbm_soft_reset; 811 srbm_soft_reset = adev->uvd.srbm_soft_reset;
812 812
@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
836{ 836{
837 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 837 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
838 838
839 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) 839 if (!adev->uvd.srbm_soft_reset)
840 return 0; 840 return 0;
841 841
842 mdelay(5); 842 mdelay(5);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3f6db4ec0102..8533269ec160 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
561#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 561#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
562 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 562 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
563 563
564static int vce_v3_0_check_soft_reset(void *handle) 564static bool vce_v3_0_check_soft_reset(void *handle)
565{ 565{
566 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 566 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
567 u32 srbm_soft_reset = 0; 567 u32 srbm_soft_reset = 0;
@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
592 } 592 }
593 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 593 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
594 mutex_unlock(&adev->grbm_idx_mutex);
594 595
595 if (srbm_soft_reset) { 596 if (srbm_soft_reset) {
596 adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
597 adev->vce.srbm_soft_reset = srbm_soft_reset; 597 adev->vce.srbm_soft_reset = srbm_soft_reset;
598 return true;
598 } else { 599 } else {
599 adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
600 adev->vce.srbm_soft_reset = 0; 600 adev->vce.srbm_soft_reset = 0;
601 return false;
601 } 602 }
602 mutex_unlock(&adev->grbm_idx_mutex);
603 return 0;
604} 603}
605 604
606static int vce_v3_0_soft_reset(void *handle) 605static int vce_v3_0_soft_reset(void *handle)
@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
608 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 607 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
609 u32 srbm_soft_reset; 608 u32 srbm_soft_reset;
610 609
611 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 610 if (!adev->vce.srbm_soft_reset)
612 return 0; 611 return 0;
613 srbm_soft_reset = adev->vce.srbm_soft_reset; 612 srbm_soft_reset = adev->vce.srbm_soft_reset;
614 613
@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
638{ 637{
639 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 638 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
640 639
641 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 640 if (!adev->vce.srbm_soft_reset)
642 return 0; 641 return 0;
643 642
644 mdelay(5); 643 mdelay(5);
@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
651{ 650{
652 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
653 652
654 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 653 if (!adev->vce.srbm_soft_reset)
655 return 0; 654 return 0;
656 655
657 mdelay(5); 656 mdelay(5);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c934b78c9e2f..bec8125bceb0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -165,7 +165,7 @@ struct amd_ip_funcs {
165 /* poll for idle */ 165 /* poll for idle */
166 int (*wait_for_idle)(void *handle); 166 int (*wait_for_idle)(void *handle);
167 /* check soft reset the IP block */ 167 /* check soft reset the IP block */
168 int (*check_soft_reset)(void *handle); 168 bool (*check_soft_reset)(void *handle);
169 /* pre soft reset the IP block */ 169 /* pre soft reset the IP block */
170 int (*pre_soft_reset)(void *handle); 170 int (*pre_soft_reset)(void *handle);
171 /* soft reset the IP block */ 171 /* soft reset the IP block */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 92b117843875..8cee4e0f9fde 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
49 uninitialize_display_phy_access_tasks, 49 uninitialize_display_phy_access_tasks,
50 disable_gfx_voltage_island_power_gating_tasks, 50 disable_gfx_voltage_island_power_gating_tasks,
51 disable_gfx_clock_gating_tasks, 51 disable_gfx_clock_gating_tasks,
52 uninitialize_thermal_controller_tasks,
52 set_boot_state_tasks, 53 set_boot_state_tasks,
53 adjust_power_state_tasks, 54 adjust_power_state_tasks,
54 disable_dynamic_state_management_tasks, 55 disable_dynamic_state_management_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 7e4fcbbbe086..960424913496 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
1785 return 0; 1785 return 0;
1786} 1786}
1787 1787
1788static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1789{
1790 int actual_temp = 0;
1791 uint32_t val = cgs_read_ind_register(hwmgr->device,
1792 CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1793 uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1794
1795 if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1796 actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1797 else
1798 actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1799
1800 return actual_temp;
1801}
1802
1788static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) 1803static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
1789{ 1804{
1790 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1805 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
1881 case AMDGPU_PP_SENSOR_VCE_POWER: 1896 case AMDGPU_PP_SENSOR_VCE_POWER:
1882 *value = cz_hwmgr->vce_power_gated ? 0 : 1; 1897 *value = cz_hwmgr->vce_power_gated ? 0 : 1;
1883 return 0; 1898 return 0;
1899 case AMDGPU_PP_SENSOR_GPU_TEMP:
1900 *value = cz_thermal_get_temperature(hwmgr);
1901 return 0;
1884 default: 1902 default:
1885 return -EINVAL; 1903 return -EINVAL;
1886 } 1904 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 508245d49d33..609996c84ad5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1031 1031
1032 /* disable SCLK dpm */ 1032 /* disable SCLK dpm */
1033 if (!data->sclk_dpm_key_disabled) 1033 if (!data->sclk_dpm_key_disabled) {
1034 PP_ASSERT_WITH_CODE( 1034 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1035 (smum_send_msg_to_smc(hwmgr->smumgr, 1035 "Trying to disable SCLK DPM when DPM is disabled",
1036 PPSMC_MSG_DPM_Disable) == 0), 1036 return 0);
1037 "Failed to disable SCLK DPM!", 1037 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
1038 return -EINVAL); 1038 }
1039 1039
1040 /* disable MCLK dpm */ 1040 /* disable MCLK dpm */
1041 if (!data->mclk_dpm_key_disabled) { 1041 if (!data->mclk_dpm_key_disabled) {
1042 PP_ASSERT_WITH_CODE( 1042 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1043 (smum_send_msg_to_smc(hwmgr->smumgr, 1043 "Trying to disable MCLK DPM when DPM is disabled",
1044 PPSMC_MSG_MCLKDPM_Disable) == 0), 1044 return 0);
1045 "Failed to disable MCLK DPM!", 1045 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
1046 return -EINVAL);
1047 } 1046 }
1048 1047
1049 return 0; 1048 return 0;
@@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1069 return -EINVAL); 1068 return -EINVAL);
1070 } 1069 }
1071 1070
1072 if (smu7_disable_sclk_mclk_dpm(hwmgr)) { 1071 smu7_disable_sclk_mclk_dpm(hwmgr);
1073 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); 1072
1074 return -EINVAL; 1073 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1075 } 1074 "Trying to disable voltage DPM when DPM is disabled",
1075 return 0);
1076
1077 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
1076 1078
1077 return 0; 1079 return 0;
1078} 1080}
@@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1226 PP_ASSERT_WITH_CODE((0 == tmp_result), 1228 PP_ASSERT_WITH_CODE((0 == tmp_result),
1227 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 1229 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1228 1230
1229 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); 1231 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
1230 1232
1231 tmp_result = smu7_enable_sclk_control(hwmgr); 1233 tmp_result = smu7_enable_sclk_control(hwmgr);
1232 PP_ASSERT_WITH_CODE((0 == tmp_result), 1234 PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1306 PP_ASSERT_WITH_CODE((tmp_result == 0), 1308 PP_ASSERT_WITH_CODE((tmp_result == 0),
1307 "Failed to disable thermal auto throttle!", result = tmp_result); 1309 "Failed to disable thermal auto throttle!", result = tmp_result);
1308 1310
1311 if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1312 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
1313 "Failed to disable AVFS!",
1314 return -EINVAL);
1315 }
1316
1309 tmp_result = smu7_stop_dpm(hwmgr); 1317 tmp_result = smu7_stop_dpm(hwmgr);
1310 PP_ASSERT_WITH_CODE((tmp_result == 0), 1318 PP_ASSERT_WITH_CODE((tmp_result == 0),
1311 "Failed to stop DPM!", result = tmp_result); 1319 "Failed to stop DPM!", result = tmp_result);
@@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1452 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; 1460 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1453 1461
1454 1462
1455 if (table_info != NULL) 1463 if (table_info == NULL)
1456 sclk_table = table_info->vdd_dep_on_sclk; 1464 return -EINVAL;
1465
1466 sclk_table = table_info->vdd_dep_on_sclk;
1457 1467
1458 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 1468 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1459 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1469 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
@@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
3802 3812
3803int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) 3813int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
3804{ 3814{
3805 const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1); 3815 const struct smu7_power_state *psa;
3806 const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2); 3816 const struct smu7_power_state *psb;
3807 int i; 3817 int i;
3808 3818
3809 if (pstate1 == NULL || pstate2 == NULL || equal == NULL) 3819 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
3810 return -EINVAL; 3820 return -EINVAL;
3811 3821
3822 psa = cast_const_phw_smu7_power_state(pstate1);
3823 psb = cast_const_phw_smu7_power_state(pstate2);
3812 /* If the two states don't even have the same number of performance levels they cannot be the same state. */ 3824 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
3813 if (psa->performance_level_count != psb->performance_level_count) { 3825 if (psa->performance_level_count != psb->performance_level_count) {
3814 *equal = false; 3826 *equal = false;
@@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4324 .set_mclk_od = smu7_set_mclk_od, 4336 .set_mclk_od = smu7_set_mclk_od,
4325 .get_clock_by_type = smu7_get_clock_by_type, 4337 .get_clock_by_type = smu7_get_clock_by_type,
4326 .read_sensor = smu7_read_sensor, 4338 .read_sensor = smu7_read_sensor,
4339 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
4327}; 4340};
4328 4341
4329uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, 4342uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index eda802bc63c8..8c889caba420 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2458 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), 2458 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2459 "Invalid VramInfo table.", return -EINVAL); 2459 "Invalid VramInfo table.", return -EINVAL);
2460 2460
2461 if (!data->is_memory_gddr5) { 2461 if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
2462 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; 2462 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2463 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; 2463 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2464 for (k = 0; k < table->num_entries; k++) { 2464 for (k = 0; k < table->num_entries; k++) {
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
332{ 332{
333 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 333 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
334 334
335 if (dcrtc->dpms != dpms) { 335 if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
336 dcrtc->dpms = dpms;
337 if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
338 WARN_ON(clk_prepare_enable(dcrtc->clk));
339 armada_drm_crtc_update(dcrtc);
340 if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
341 clk_disable_unprepare(dcrtc->clk);
342 if (dpms_blanked(dpms)) 336 if (dpms_blanked(dpms))
343 armada_drm_vblank_off(dcrtc); 337 armada_drm_vblank_off(dcrtc);
344 else 338 else if (!IS_ERR(dcrtc->clk))
339 WARN_ON(clk_prepare_enable(dcrtc->clk));
340 dcrtc->dpms = dpms;
341 armada_drm_crtc_update(dcrtc);
342 if (!dpms_blanked(dpms))
345 drm_crtc_vblank_on(&dcrtc->crtc); 343 drm_crtc_vblank_on(&dcrtc->crtc);
344 else if (!IS_ERR(dcrtc->clk))
345 clk_disable_unprepare(dcrtc->clk);
346 } else if (dcrtc->dpms != dpms) {
347 dcrtc->dpms = dpms;
346 } 348 }
347} 349}
348 350
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 1df2d33d0b40..ffb2ab389d1d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
54 54
55 mutex_lock(&dev->master_mutex); 55 mutex_lock(&dev->master_mutex);
56 master = dev->master; 56 master = dev->master;
57 if (!master)
58 goto out_unlock;
59
60 seq_printf(m, "%s", dev->driver->name); 57 seq_printf(m, "%s", dev->driver->name);
61 if (dev->dev) 58 if (dev->dev)
62 seq_printf(m, " dev=%s", dev_name(dev->dev)); 59 seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
65 if (dev->unique) 62 if (dev->unique)
66 seq_printf(m, " unique=%s", dev->unique); 63 seq_printf(m, " unique=%s", dev->unique);
67 seq_printf(m, "\n"); 64 seq_printf(m, "\n");
68out_unlock:
69 mutex_unlock(&dev->master_mutex); 65 mutex_unlock(&dev->master_mutex);
70 66
71 return 0; 67 return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cb86c7e5495c..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
329 /* 329 /*
330 * Append a LINK to the submitted command buffer to return to 330 * Append a LINK to the submitted command buffer to return to
331 * the ring buffer. return_target is the ring target address. 331 * the ring buffer. return_target is the ring target address.
332 * We need three dwords: event, wait, link. 332 * We need at most 7 dwords in the return target: 2 cache flush +
333 * 2 semaphore stall + 1 event + 1 wait + 1 link.
333 */ 334 */
334 return_dwords = 3; 335 return_dwords = 7;
335 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); 336 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
336 CMD_LINK(cmdbuf, return_dwords, return_target); 337 CMD_LINK(cmdbuf, return_dwords, return_target);
337 338
338 /* 339 /*
339 * Append event, wait and link pointing back to the wait 340 * Append a cache flush, stall, event, wait and link pointing back to
340 * command to the ring buffer. 341 * the wait command to the ring buffer.
341 */ 342 */
343 if (gpu->exec_state == ETNA_PIPE_2D) {
344 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
345 VIVS_GL_FLUSH_CACHE_PE2D);
346 } else {
347 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
348 VIVS_GL_FLUSH_CACHE_DEPTH |
349 VIVS_GL_FLUSH_CACHE_COLOR);
350 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
351 VIVS_TS_FLUSH_CACHE_FLUSH);
352 }
353 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
354 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
342 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | 355 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
343 VIVS_GL_EVENT_FROM_PE); 356 VIVS_GL_EVENT_FROM_PE);
344 CMD_WAIT(buffer); 357 CMD_WAIT(buffer);
345 CMD_LINK(buffer, 2, return_target + 8); 358 CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
359 buffer->user_size - 4);
346 360
347 if (drm_debug & DRM_UT_DRIVER) 361 if (drm_debug & DRM_UT_DRIVER)
348 pr_info("stream link to 0x%08x @ 0x%08x %p\n", 362 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
748 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; 748 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
749 struct page **pvec; 749 struct page **pvec;
750 uintptr_t ptr; 750 uintptr_t ptr;
751 unsigned int flags = 0;
751 752
752 pvec = drm_malloc_ab(npages, sizeof(struct page *)); 753 pvec = drm_malloc_ab(npages, sizeof(struct page *));
753 if (!pvec) 754 if (!pvec)
754 return ERR_PTR(-ENOMEM); 755 return ERR_PTR(-ENOMEM);
755 756
757 if (!etnaviv_obj->userptr.ro)
758 flags |= FOLL_WRITE;
759
756 pinned = 0; 760 pinned = 0;
757 ptr = etnaviv_obj->userptr.ptr; 761 ptr = etnaviv_obj->userptr.ptr;
758 762
759 down_read(&mm->mmap_sem); 763 down_read(&mm->mmap_sem);
760 while (pinned < npages) { 764 while (pinned < npages) {
761 ret = get_user_pages_remote(task, mm, ptr, npages - pinned, 765 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
762 !etnaviv_obj->userptr.ro, 0, 766 flags, pvec + pinned, NULL);
763 pvec + pinned, NULL);
764 if (ret < 0) 767 if (ret < 0)
765 break; 768 break;
766 769
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d3796ed8d8c5..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
330 return (u32)buf->vram_node.start; 330 return (u32)buf->vram_node.start;
331 331
332 mutex_lock(&mmu->lock); 332 mutex_lock(&mmu->lock);
333 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size); 333 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
334 buf->size + SZ_64K);
334 if (ret < 0) { 335 if (ret < 0) {
335 mutex_unlock(&mmu->lock); 336 mutex_unlock(&mmu->lock);
336 return 0; 337 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index aa92decf4233..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
488 goto err_free; 488 goto err_free;
489 } 489 }
490 490
491 ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); 491 ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
492 g2d_userptr->vec);
492 if (ret != npages) { 493 if (ret != npages) {
493 DRM_ERROR("failed to get user pages from userptr.\n"); 494 DRM_ERROR("failed to get user pages from userptr.\n");
494 if (ret < 0) 495 if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..b2d5e188b1b8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
51 DCU_MODE_DCU_MODE(DCU_MODE_OFF)); 51 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
52 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 52 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
53 DCU_UPDATE_MODE_READREG); 53 DCU_UPDATE_MODE_READREG);
54 clk_disable_unprepare(fsl_dev->pix_clk);
54} 55}
55 56
56static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) 57static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
58 struct drm_device *dev = crtc->dev; 59 struct drm_device *dev = crtc->dev;
59 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 60 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
60 61
62 clk_prepare_enable(fsl_dev->pix_clk);
61 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 63 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
62 DCU_MODE_DCU_MODE_MASK, 64 DCU_MODE_DCU_MODE_MASK,
63 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); 65 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
116 DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | 118 DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
117 DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | 119 DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
118 DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); 120 DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
119 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
120 DCU_UPDATE_MODE_READREG);
121 return; 121 return;
122} 122}
123 123
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0884c45aefe8..e04efbed1a54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
267 return ret; 267 return ret;
268 } 268 }
269 269
270 ret = clk_prepare_enable(fsl_dev->pix_clk); 270 if (fsl_dev->tcon)
271 if (ret < 0) { 271 fsl_tcon_bypass_enable(fsl_dev->tcon);
272 dev_err(dev, "failed to enable pix clk\n");
273 goto disable_dcu_clk;
274 }
275
276 fsl_dcu_drm_init_planes(fsl_dev->drm); 272 fsl_dcu_drm_init_planes(fsl_dev->drm);
277 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); 273 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
278 274
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
284 enable_irq(fsl_dev->irq); 280 enable_irq(fsl_dev->irq);
285 281
286 return 0; 282 return 0;
287
288disable_dcu_clk:
289 clk_disable_unprepare(fsl_dev->clk);
290 return ret;
291} 283}
292#endif 284#endif
293 285
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
401 goto disable_clk; 393 goto disable_clk;
402 } 394 }
403 395
404 ret = clk_prepare_enable(fsl_dev->pix_clk);
405 if (ret < 0) {
406 dev_err(dev, "failed to enable pix clk\n");
407 goto unregister_pix_clk;
408 }
409
410 fsl_dev->tcon = fsl_tcon_init(dev); 396 fsl_dev->tcon = fsl_tcon_init(dev);
411 397
412 drm = drm_dev_alloc(driver, dev); 398 drm = drm_dev_alloc(driver, dev);
413 if (IS_ERR(drm)) { 399 if (IS_ERR(drm)) {
414 ret = PTR_ERR(drm); 400 ret = PTR_ERR(drm);
415 goto disable_pix_clk; 401 goto unregister_pix_clk;
416 } 402 }
417 403
418 fsl_dev->dev = dev; 404 fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
433 419
434unref: 420unref:
435 drm_dev_unref(drm); 421 drm_dev_unref(drm);
436disable_pix_clk:
437 clk_disable_unprepare(fsl_dev->pix_clk);
438unregister_pix_clk: 422unregister_pix_clk:
439 clk_unregister(fsl_dev->pix_clk); 423 clk_unregister(fsl_dev->pix_clk);
440disable_clk: 424disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
447 struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); 431 struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
448 432
449 clk_disable_unprepare(fsl_dev->clk); 433 clk_disable_unprepare(fsl_dev->clk);
450 clk_disable_unprepare(fsl_dev->pix_clk);
451 clk_unregister(fsl_dev->pix_clk); 434 clk_unregister(fsl_dev->pix_clk);
452 drm_put_dev(fsl_dev->drm); 435 drm_put_dev(fsl_dev->drm);
453 436
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a7e5486bd1e9..9e6f7d8112b3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
211 for (j = 1; j <= fsl_dev->soc->layer_regs; j++) 211 for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
212 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); 212 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
213 } 213 }
214 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
215 DCU_MODE_DCU_MODE_MASK,
216 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
217 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
218 DCU_UPDATE_MODE_READREG);
219} 214}
220 215
221struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) 216struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
20#include "fsl_dcu_drm_drv.h" 20#include "fsl_dcu_drm_drv.h"
21#include "fsl_tcon.h" 21#include "fsl_tcon.h"
22 22
23static int
24fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
25 struct drm_crtc_state *crtc_state,
26 struct drm_connector_state *conn_state)
27{
28 return 0;
29}
30
31static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
32{
33 struct drm_device *dev = encoder->dev;
34 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
35
36 if (fsl_dev->tcon)
37 fsl_tcon_bypass_disable(fsl_dev->tcon);
38}
39
40static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
41{
42 struct drm_device *dev = encoder->dev;
43 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
44
45 if (fsl_dev->tcon)
46 fsl_tcon_bypass_enable(fsl_dev->tcon);
47}
48
49static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
50 .atomic_check = fsl_dcu_drm_encoder_atomic_check,
51 .disable = fsl_dcu_drm_encoder_disable,
52 .enable = fsl_dcu_drm_encoder_enable,
53};
54
55static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) 23static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
56{ 24{
57 drm_encoder_cleanup(encoder); 25 drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
68 int ret; 36 int ret;
69 37
70 encoder->possible_crtcs = 1; 38 encoder->possible_crtcs = 1;
39
40 /* Use bypass mode for parallel RGB/LVDS encoder */
41 if (fsl_dev->tcon)
42 fsl_tcon_bypass_enable(fsl_dev->tcon);
43
71 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, 44 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
72 DRM_MODE_ENCODER_LVDS, NULL); 45 DRM_MODE_ENCODER_LVDS, NULL);
73 if (ret < 0) 46 if (ret < 0)
74 return ret; 47 return ret;
75 48
76 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
77
78 return 0; 49 return 0;
79} 50}
80 51
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
508 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); 508 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
509 if (pvec != NULL) { 509 if (pvec != NULL) {
510 struct mm_struct *mm = obj->userptr.mm->mm; 510 struct mm_struct *mm = obj->userptr.mm->mm;
511 unsigned int flags = 0;
512
513 if (!obj->userptr.read_only)
514 flags |= FOLL_WRITE;
511 515
512 ret = -EFAULT; 516 ret = -EFAULT;
513 if (atomic_inc_not_zero(&mm->mm_users)) { 517 if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
517 (work->task, mm, 521 (work->task, mm,
518 obj->userptr.ptr + pinned * PAGE_SIZE, 522 obj->userptr.ptr + pinned * PAGE_SIZE,
519 npages - pinned, 523 npages - pinned,
520 !obj->userptr.read_only, 0, 524 flags,
521 pvec + pinned, NULL); 525 pvec + pinned, NULL);
522 if (ret < 0) 526 if (ret < 0)
523 break; 527 break;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd0b4..5a26eb4545aa 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
156 struct drm_device *dev = rdev->ddev; 156 struct drm_device *dev = rdev->ddev;
157 struct drm_crtc *crtc; 157 struct drm_crtc *crtc;
158 struct radeon_crtc *radeon_crtc; 158 struct radeon_crtc *radeon_crtc;
159 u32 line_time_us, vblank_lines; 159 u32 vblank_in_pixels;
160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
161 161
162 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 162 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
163 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 163 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
164 radeon_crtc = to_radeon_crtc(crtc); 164 radeon_crtc = to_radeon_crtc(crtc);
165 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 165 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
166 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / 166 vblank_in_pixels =
167 radeon_crtc->hw_mode.clock; 167 radeon_crtc->hw_mode.crtc_htotal *
168 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - 168 (radeon_crtc->hw_mode.crtc_vblank_end -
169 radeon_crtc->hw_mode.crtc_vdisplay + 169 radeon_crtc->hw_mode.crtc_vdisplay +
170 (radeon_crtc->v_border * 2); 170 (radeon_crtc->v_border * 2));
171 vblank_time_us = vblank_lines * line_time_us; 171
172 vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
172 break; 173 break;
173 } 174 }
174 } 175 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 50e96d2c593d..e18839d52e3e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
927 return ret; 927 return ret;
928} 928}
929 929
930static void radeon_connector_unregister(struct drm_connector *connector)
931{
932 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
933
934 if (radeon_connector->ddc_bus->has_aux) {
935 drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
936 radeon_connector->ddc_bus->has_aux = false;
937 }
938}
939
930static void radeon_connector_destroy(struct drm_connector *connector) 940static void radeon_connector_destroy(struct drm_connector *connector)
931{ 941{
932 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 942 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
984 .dpms = drm_helper_connector_dpms, 994 .dpms = drm_helper_connector_dpms,
985 .detect = radeon_lvds_detect, 995 .detect = radeon_lvds_detect,
986 .fill_modes = drm_helper_probe_single_connector_modes, 996 .fill_modes = drm_helper_probe_single_connector_modes,
997 .early_unregister = radeon_connector_unregister,
987 .destroy = radeon_connector_destroy, 998 .destroy = radeon_connector_destroy,
988 .set_property = radeon_lvds_set_property, 999 .set_property = radeon_lvds_set_property,
989}; 1000};
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
1111 .dpms = drm_helper_connector_dpms, 1122 .dpms = drm_helper_connector_dpms,
1112 .detect = radeon_vga_detect, 1123 .detect = radeon_vga_detect,
1113 .fill_modes = drm_helper_probe_single_connector_modes, 1124 .fill_modes = drm_helper_probe_single_connector_modes,
1125 .early_unregister = radeon_connector_unregister,
1114 .destroy = radeon_connector_destroy, 1126 .destroy = radeon_connector_destroy,
1115 .set_property = radeon_connector_set_property, 1127 .set_property = radeon_connector_set_property,
1116}; 1128};
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
1188 .dpms = drm_helper_connector_dpms, 1200 .dpms = drm_helper_connector_dpms,
1189 .detect = radeon_tv_detect, 1201 .detect = radeon_tv_detect,
1190 .fill_modes = drm_helper_probe_single_connector_modes, 1202 .fill_modes = drm_helper_probe_single_connector_modes,
1203 .early_unregister = radeon_connector_unregister,
1191 .destroy = radeon_connector_destroy, 1204 .destroy = radeon_connector_destroy,
1192 .set_property = radeon_connector_set_property, 1205 .set_property = radeon_connector_set_property,
1193}; 1206};
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
1519 .detect = radeon_dvi_detect, 1532 .detect = radeon_dvi_detect,
1520 .fill_modes = drm_helper_probe_single_connector_modes, 1533 .fill_modes = drm_helper_probe_single_connector_modes,
1521 .set_property = radeon_connector_set_property, 1534 .set_property = radeon_connector_set_property,
1535 .early_unregister = radeon_connector_unregister,
1522 .destroy = radeon_connector_destroy, 1536 .destroy = radeon_connector_destroy,
1523 .force = radeon_dvi_force, 1537 .force = radeon_dvi_force,
1524}; 1538};
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1832 .detect = radeon_dp_detect, 1846 .detect = radeon_dp_detect,
1833 .fill_modes = drm_helper_probe_single_connector_modes, 1847 .fill_modes = drm_helper_probe_single_connector_modes,
1834 .set_property = radeon_connector_set_property, 1848 .set_property = radeon_connector_set_property,
1849 .early_unregister = radeon_connector_unregister,
1835 .destroy = radeon_connector_destroy, 1850 .destroy = radeon_connector_destroy,
1836 .force = radeon_dvi_force, 1851 .force = radeon_dvi_force,
1837}; 1852};
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
1841 .detect = radeon_dp_detect, 1856 .detect = radeon_dp_detect,
1842 .fill_modes = drm_helper_probe_single_connector_modes, 1857 .fill_modes = drm_helper_probe_single_connector_modes,
1843 .set_property = radeon_lvds_set_property, 1858 .set_property = radeon_lvds_set_property,
1859 .early_unregister = radeon_connector_unregister,
1844 .destroy = radeon_connector_destroy, 1860 .destroy = radeon_connector_destroy,
1845 .force = radeon_dvi_force, 1861 .force = radeon_dvi_force,
1846}; 1862};
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
1850 .detect = radeon_dp_detect, 1866 .detect = radeon_dp_detect,
1851 .fill_modes = drm_helper_probe_single_connector_modes, 1867 .fill_modes = drm_helper_probe_single_connector_modes,
1852 .set_property = radeon_lvds_set_property, 1868 .set_property = radeon_lvds_set_property,
1869 .early_unregister = radeon_connector_unregister,
1853 .destroy = radeon_connector_destroy, 1870 .destroy = radeon_connector_destroy,
1854 .force = radeon_dvi_force, 1871 .force = radeon_dvi_force,
1855}; 1872};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8ab30a7dd6d..cdb8cb568c15 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
1675 1675
1676void radeon_modeset_fini(struct radeon_device *rdev) 1676void radeon_modeset_fini(struct radeon_device *rdev)
1677{ 1677{
1678 radeon_fbdev_fini(rdev);
1679 kfree(rdev->mode_info.bios_hardcoded_edid);
1680
1681 /* free i2c buses */
1682 radeon_i2c_fini(rdev);
1683
1684 if (rdev->mode_info.mode_config_initialized) { 1678 if (rdev->mode_info.mode_config_initialized) {
1685 radeon_afmt_fini(rdev);
1686 drm_kms_helper_poll_fini(rdev->ddev); 1679 drm_kms_helper_poll_fini(rdev->ddev);
1687 radeon_hpd_fini(rdev); 1680 radeon_hpd_fini(rdev);
1688 drm_crtc_force_disable_all(rdev->ddev); 1681 drm_crtc_force_disable_all(rdev->ddev);
1682 radeon_fbdev_fini(rdev);
1683 radeon_afmt_fini(rdev);
1689 drm_mode_config_cleanup(rdev->ddev); 1684 drm_mode_config_cleanup(rdev->ddev);
1690 rdev->mode_info.mode_config_initialized = false; 1685 rdev->mode_info.mode_config_initialized = false;
1691 } 1686 }
1687
1688 kfree(rdev->mode_info.bios_hardcoded_edid);
1689
1690 /* free i2c buses */
1691 radeon_i2c_fini(rdev);
1692} 1692}
1693 1693
1694static bool is_hdtv_mode(const struct drm_display_mode *mode) 1694static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 91c8f4339566..00ea0002b539 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,9 +96,10 @@
96 * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI 96 * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen 97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen
98 * 2.47.0 - Add UVD_NO_OP register support 98 * 2.47.0 - Add UVD_NO_OP register support
99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
99 */ 100 */
100#define KMS_DRIVER_MAJOR 2 101#define KMS_DRIVER_MAJOR 2
101#define KMS_DRIVER_MINOR 47 102#define KMS_DRIVER_MINOR 48
102#define KMS_DRIVER_PATCHLEVEL 0 103#define KMS_DRIVER_PATCHLEVEL 0
103int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 104int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
104int radeon_driver_unload_kms(struct drm_device *dev); 105int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 021aa005623f..29f7817af821 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
982{ 982{
983 if (!i2c) 983 if (!i2c)
984 return; 984 return;
985 WARN_ON(i2c->has_aux);
985 i2c_del_adapter(&i2c->adapter); 986 i2c_del_adapter(&i2c->adapter);
986 if (i2c->has_aux)
987 drm_dp_aux_unregister(&i2c->aux);
988 kfree(i2c); 987 kfree(i2c);
989} 988}
990 989
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 455268214b89..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
566 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 566 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
567 struct page **pages = ttm->pages + pinned; 567 struct page **pages = ttm->pages + pinned;
568 568
569 r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); 569 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
570 pages, NULL);
570 if (r < 0) 571 if (r < 0)
571 goto release_pages; 572 goto release_pages;
572 573
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7ee9aafbdf74..e402be8821c4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
4431 case SPI_CONFIG_CNTL: 4431 case SPI_CONFIG_CNTL:
4432 case SPI_CONFIG_CNTL_1: 4432 case SPI_CONFIG_CNTL_1:
4433 case TA_CNTL_AUX: 4433 case TA_CNTL_AUX:
4434 case TA_CS_BC_BASE_ADDR:
4434 return true; 4435 return true;
4435 default: 4436 default:
4436 DRM_ERROR("Invalid register 0x%x in CS\n", reg); 4437 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index eb220eecba78..65a911ddd509 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
1145#define SPI_LB_CU_MASK 0x9354 1145#define SPI_LB_CU_MASK 0x9354
1146 1146
1147#define TA_CNTL_AUX 0x9508 1147#define TA_CNTL_AUX 0x9508
1148#define TA_CS_BC_BASE_ADDR 0x950C
1148 1149
1149#define CC_RB_BACKEND_DISABLE 0x98F4 1150#define CC_RB_BACKEND_DISABLE 0x98F4
1150#define BACKEND_DISABLE(x) ((x) << 16) 1151#define BACKEND_DISABLE(x) ((x) << 16)
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages((unsigned long)xfer->mem_addr, 242 ret = get_user_pages((unsigned long)xfer->mem_addr,
243 vsg->num_pages, 243 vsg->num_pages,
244 (vsg->direction == DMA_FROM_DEVICE), 244 (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
245 0, vsg->pages, NULL); 245 vsg->pages, NULL);
246 246
247 up_read(&current->mm->mmap_sem); 247 up_read(&current->mm->mmap_sem);
248 if (ret != vsg->num_pages) { 248 if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
241 void *ptr); 241 void *ptr);
242 242
243MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 243MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
244module_param_named(enable_fbdev, enable_fbdev, int, 0600); 244module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
245MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 245MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
246module_param_named(force_dma_api, vmw_force_iommu, int, 0600); 246module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
247MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 247MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
248module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 248module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
249MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 249MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
250module_param_named(force_coherent, vmw_force_coherent, int, 0600); 250module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
251MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 251MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
252module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 252module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
253MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 253MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
254module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 254module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
255 255
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 070d750af16d..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
43 43
44#define VMWGFX_DRIVER_DATE "20160210" 44#define VMWGFX_DRIVER_DATE "20160210"
45#define VMWGFX_DRIVER_MAJOR 2 45#define VMWGFX_DRIVER_MAJOR 2
46#define VMWGFX_DRIVER_MINOR 10 46#define VMWGFX_DRIVER_MINOR 11
47#define VMWGFX_DRIVER_PATCHLEVEL 0 47#define VMWGFX_DRIVER_PATCHLEVEL 0
48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
35#define VMW_RES_HT_ORDER 12 35#define VMW_RES_HT_ORDER 12
36 36
37/** 37/**
38 * enum vmw_resource_relocation_type - Relocation type for resources
39 *
40 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
41 * command stream is replaced with the actual id after validation.
42 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
43 * with a NOP.
44 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
45 * after validation is -1, the command is replaced with a NOP. Otherwise no
46 * action.
47 */
48enum vmw_resource_relocation_type {
49 vmw_res_rel_normal,
50 vmw_res_rel_nop,
51 vmw_res_rel_cond_nop,
52 vmw_res_rel_max
53};
54
55/**
38 * struct vmw_resource_relocation - Relocation info for resources 56 * struct vmw_resource_relocation - Relocation info for resources
39 * 57 *
40 * @head: List head for the software context's relocation list. 58 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource. 59 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the 60 * @offset: Offset of single byte entries into the command buffer where the
43 * id that needs fixup is located. 61 * id that needs fixup is located.
62 * @rel_type: Type of relocation.
44 */ 63 */
45struct vmw_resource_relocation { 64struct vmw_resource_relocation {
46 struct list_head head; 65 struct list_head head;
47 const struct vmw_resource *res; 66 const struct vmw_resource *res;
48 unsigned long offset; 67 u32 offset:29;
68 enum vmw_resource_relocation_type rel_type:3;
49}; 69};
50 70
51/** 71/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo, 129 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob, 130 bool validate_as_mob,
111 uint32_t *p_val_node); 131 uint32_t *p_val_node);
112 132/**
133 * vmw_ptr_diff - Compute the offset from a to b in bytes
134 *
135 * @a: A starting pointer.
136 * @b: A pointer offset in the same address space.
137 *
138 * Returns: The offset in bytes between the two pointers.
139 */
140static size_t vmw_ptr_diff(void *a, void *b)
141{
142 return (unsigned long) b - (unsigned long) a;
143}
113 144
114/** 145/**
115 * vmw_resources_unreserve - unreserve resources previously reserved for 146 * vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
409 * @list: Pointer to head of relocation list. 440 * @list: Pointer to head of relocation list.
410 * @res: The resource. 441 * @res: The resource.
411 * @offset: Offset into the command buffer currently being parsed where the 442 * @offset: Offset into the command buffer currently being parsed where the
412 * id that needs fixup is located. Granularity is 4 bytes. 443 * id that needs fixup is located. Granularity is one byte.
444 * @rel_type: Relocation type.
413 */ 445 */
414static int vmw_resource_relocation_add(struct list_head *list, 446static int vmw_resource_relocation_add(struct list_head *list,
415 const struct vmw_resource *res, 447 const struct vmw_resource *res,
416 unsigned long offset) 448 unsigned long offset,
449 enum vmw_resource_relocation_type
450 rel_type)
417{ 451{
418 struct vmw_resource_relocation *rel; 452 struct vmw_resource_relocation *rel;
419 453
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
425 459
426 rel->res = res; 460 rel->res = res;
427 rel->offset = offset; 461 rel->offset = offset;
462 rel->rel_type = rel_type;
428 list_add_tail(&rel->head, list); 463 list_add_tail(&rel->head, list);
429 464
430 return 0; 465 return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
459{ 494{
460 struct vmw_resource_relocation *rel; 495 struct vmw_resource_relocation *rel;
461 496
497 /* Validate the struct vmw_resource_relocation member size */
498 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
499 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
500
462 list_for_each_entry(rel, list, head) { 501 list_for_each_entry(rel, list, head) {
463 if (likely(rel->res != NULL)) 502 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
464 cb[rel->offset] = rel->res->id; 503 switch (rel->rel_type) {
465 else 504 case vmw_res_rel_normal:
466 cb[rel->offset] = SVGA_3D_CMD_NOP; 505 *addr = rel->res->id;
506 break;
507 case vmw_res_rel_nop:
508 *addr = SVGA_3D_CMD_NOP;
509 break;
510 default:
511 if (rel->res->id == -1)
512 *addr = SVGA_3D_CMD_NOP;
513 break;
514 }
467 } 515 }
468} 516}
469 517
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
655 *p_val = NULL; 703 *p_val = NULL;
656 ret = vmw_resource_relocation_add(&sw_context->res_relocations, 704 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657 res, 705 res,
658 id_loc - sw_context->buf_start); 706 vmw_ptr_diff(sw_context->buf_start,
707 id_loc),
708 vmw_res_rel_normal);
659 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
660 return ret; 710 return ret;
661 711
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
721 771
722 return vmw_resource_relocation_add 772 return vmw_resource_relocation_add
723 (&sw_context->res_relocations, res, 773 (&sw_context->res_relocations, res,
724 id_loc - sw_context->buf_start); 774 vmw_ptr_diff(sw_context->buf_start, id_loc),
775 vmw_res_rel_normal);
725 } 776 }
726 777
727 ret = vmw_user_resource_lookup_handle(dev_priv, 778 ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2143 return ret; 2194 return ret;
2144 2195
2145 return vmw_resource_relocation_add(&sw_context->res_relocations, 2196 return vmw_resource_relocation_add(&sw_context->res_relocations,
2146 NULL, &cmd->header.id - 2197 NULL,
2147 sw_context->buf_start); 2198 vmw_ptr_diff(sw_context->buf_start,
2148 2199 &cmd->header.id),
2149 return 0; 2200 vmw_res_rel_nop);
2150} 2201}
2151 2202
2152/** 2203/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2188 return ret; 2239 return ret;
2189 2240
2190 return vmw_resource_relocation_add(&sw_context->res_relocations, 2241 return vmw_resource_relocation_add(&sw_context->res_relocations,
2191 NULL, &cmd->header.id - 2242 NULL,
2192 sw_context->buf_start); 2243 vmw_ptr_diff(sw_context->buf_start,
2193 2244 &cmd->header.id),
2194 return 0; 2245 vmw_res_rel_nop);
2195} 2246}
2196 2247
2197/** 2248/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2848 * @header: Pointer to the command header in the command stream. 2899 * @header: Pointer to the command header in the command stream.
2849 * 2900 *
2850 * Check that the view exists, and if it was not created using this 2901 * Check that the view exists, and if it was not created using this
2851 * command batch, make sure it's validated (present in the device) so that 2902 * command batch, conditionally make this command a NOP.
2852 * the remove command will not confuse the device.
2853 */ 2903 */
2854static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2904static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2855 struct vmw_sw_context *sw_context, 2905 struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2877 return ret; 2927 return ret;
2878 2928
2879 /* 2929 /*
2880 * Add view to the validate list iff it was not created using this 2930 * If the view wasn't created during this command batch, it might
2881 * command batch. 2931 * have been removed due to a context swapout, so add a
2932 * relocation to conditionally make this command a NOP to avoid
2933 * device errors.
2882 */ 2934 */
2883 return vmw_view_res_val_add(sw_context, view); 2935 return vmw_resource_relocation_add(&sw_context->res_relocations,
2936 view,
2937 vmw_ptr_diff(sw_context->buf_start,
2938 &cmd->header.id),
2939 vmw_res_rel_cond_nop);
2884} 2940}
2885 2941
2886/** 2942/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3029 cmd->body.shaderResourceViewId); 3085 cmd->body.shaderResourceViewId);
3030} 3086}
3031 3087
3088/**
3089 * vmw_cmd_dx_transfer_from_buffer -
3090 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3091 *
3092 * @dev_priv: Pointer to a device private struct.
3093 * @sw_context: The software context being used for this batch.
3094 * @header: Pointer to the command header in the command stream.
3095 */
3096static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3097 struct vmw_sw_context *sw_context,
3098 SVGA3dCmdHeader *header)
3099{
3100 struct {
3101 SVGA3dCmdHeader header;
3102 SVGA3dCmdDXTransferFromBuffer body;
3103 } *cmd = container_of(header, typeof(*cmd), header);
3104 int ret;
3105
3106 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3107 user_surface_converter,
3108 &cmd->body.srcSid, NULL);
3109 if (ret != 0)
3110 return ret;
3111
3112 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3113 user_surface_converter,
3114 &cmd->body.destSid, NULL);
3115}
3116
3032static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 3117static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3033 struct vmw_sw_context *sw_context, 3118 struct vmw_sw_context *sw_context,
3034 void *buf, uint32_t *size) 3119 void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3379 &vmw_cmd_buffer_copy_check, true, false, true), 3464 &vmw_cmd_buffer_copy_check, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, 3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3381 &vmw_cmd_pred_copy_check, true, false, true), 3466 &vmw_cmd_pred_copy_check, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3468 &vmw_cmd_dx_transfer_from_buffer,
3469 true, false, true),
3382}; 3470};
3383 3471
3384static int vmw_cmd_check(struct vmw_private *dev_priv, 3472static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3848 int ret; 3936 int ret;
3849 3937
3850 *header = NULL; 3938 *header = NULL;
3851 if (!dev_priv->cman || kernel_commands)
3852 return kernel_commands;
3853
3854 if (command_size > SVGA_CB_MAX_SIZE) { 3939 if (command_size > SVGA_CB_MAX_SIZE) {
3855 DRM_ERROR("Command buffer is too large.\n"); 3940 DRM_ERROR("Command buffer is too large.\n");
3856 return ERR_PTR(-EINVAL); 3941 return ERR_PTR(-EINVAL);
3857 } 3942 }
3858 3943
3944 if (!dev_priv->cman || kernel_commands)
3945 return kernel_commands;
3946
3859 /* If possible, add a little space for fencing. */ 3947 /* If possible, add a little space for fencing. */
3860 cmdbuf_size = command_size + 512; 3948 cmdbuf_size = command_size + 512;
3861 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); 3949 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4232 ttm_bo_unref(&query_val.bo); 4320 ttm_bo_unref(&query_val.bo);
4233 ttm_bo_unref(&pinned_val.bo); 4321 ttm_bo_unref(&pinned_val.bo);
4234 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 4322 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4235 DRM_INFO("Dummy query bo pin count: %d\n",
4236 dev_priv->dummy_query_bo->pin_count);
4237
4238out_unlock: 4323out_unlock:
4239 return; 4324 return;
4240 4325
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
574 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 574 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
575 long lret; 575 long lret;
576 576
577 if (nonblock) 577 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
578 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; 578 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
579
580 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
581 if (!lret) 579 if (!lret)
582 return -EBUSY; 580 return -EBUSY;
583 else if (lret < 0) 581 else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
324 if (res->id != -1) { 324 if (res->id != -1) {
325 325
326 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); 326 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
327 if (unlikely(cmd == NULL)) { 327 if (unlikely(!cmd)) {
328 DRM_ERROR("Failed reserving FIFO space for surface " 328 DRM_ERROR("Failed reserving FIFO space for surface "
329 "destruction.\n"); 329 "destruction.\n");
330 return; 330 return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
397 397
398 submit_size = vmw_surface_define_size(srf); 398 submit_size = vmw_surface_define_size(srf);
399 cmd = vmw_fifo_reserve(dev_priv, submit_size); 399 cmd = vmw_fifo_reserve(dev_priv, submit_size);
400 if (unlikely(cmd == NULL)) { 400 if (unlikely(!cmd)) {
401 DRM_ERROR("Failed reserving FIFO space for surface " 401 DRM_ERROR("Failed reserving FIFO space for surface "
402 "creation.\n"); 402 "creation.\n");
403 ret = -ENOMEM; 403 ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
446 uint8_t *cmd; 446 uint8_t *cmd;
447 struct vmw_private *dev_priv = res->dev_priv; 447 struct vmw_private *dev_priv = res->dev_priv;
448 448
449 BUG_ON(val_buf->bo == NULL); 449 BUG_ON(!val_buf->bo);
450
451 submit_size = vmw_surface_dma_size(srf); 450 submit_size = vmw_surface_dma_size(srf);
452 cmd = vmw_fifo_reserve(dev_priv, submit_size); 451 cmd = vmw_fifo_reserve(dev_priv, submit_size);
453 if (unlikely(cmd == NULL)) { 452 if (unlikely(!cmd)) {
454 DRM_ERROR("Failed reserving FIFO space for surface " 453 DRM_ERROR("Failed reserving FIFO space for surface "
455 "DMA.\n"); 454 "DMA.\n");
456 return -ENOMEM; 455 return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
538 537
539 submit_size = vmw_surface_destroy_size(); 538 submit_size = vmw_surface_destroy_size();
540 cmd = vmw_fifo_reserve(dev_priv, submit_size); 539 cmd = vmw_fifo_reserve(dev_priv, submit_size);
541 if (unlikely(cmd == NULL)) { 540 if (unlikely(!cmd)) {
542 DRM_ERROR("Failed reserving FIFO space for surface " 541 DRM_ERROR("Failed reserving FIFO space for surface "
543 "eviction.\n"); 542 "eviction.\n");
544 return -ENOMEM; 543 return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
578 int ret; 577 int ret;
579 struct vmw_resource *res = &srf->res; 578 struct vmw_resource *res = &srf->res;
580 579
581 BUG_ON(res_free == NULL); 580 BUG_ON(!res_free);
582 if (!dev_priv->has_mob) 581 if (!dev_priv->has_mob)
583 vmw_fifo_resource_inc(dev_priv); 582 vmw_fifo_resource_inc(dev_priv);
584 ret = vmw_resource_init(dev_priv, res, true, res_free, 583 ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
700 struct drm_vmw_surface_create_req *req = &arg->req; 699 struct drm_vmw_surface_create_req *req = &arg->req;
701 struct drm_vmw_surface_arg *rep = &arg->rep; 700 struct drm_vmw_surface_arg *rep = &arg->rep;
702 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 701 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
703 struct drm_vmw_size __user *user_sizes;
704 int ret; 702 int ret;
705 int i, j; 703 int i, j;
706 uint32_t cur_bo_offset; 704 uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
748 } 746 }
749 747
750 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 748 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
751 if (unlikely(user_srf == NULL)) { 749 if (unlikely(!user_srf)) {
752 ret = -ENOMEM; 750 ret = -ENOMEM;
753 goto out_no_user_srf; 751 goto out_no_user_srf;
754 } 752 }
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
763 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 761 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
764 srf->num_sizes = num_sizes; 762 srf->num_sizes = num_sizes;
765 user_srf->size = size; 763 user_srf->size = size;
766 764 srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
767 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); 765 req->size_addr,
768 if (unlikely(srf->sizes == NULL)) { 766 sizeof(*srf->sizes) * srf->num_sizes);
769 ret = -ENOMEM; 767 if (IS_ERR(srf->sizes)) {
768 ret = PTR_ERR(srf->sizes);
770 goto out_no_sizes; 769 goto out_no_sizes;
771 } 770 }
772 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), 771 srf->offsets = kmalloc_array(srf->num_sizes,
773 GFP_KERNEL); 772 sizeof(*srf->offsets),
774 if (unlikely(srf->offsets == NULL)) { 773 GFP_KERNEL);
774 if (unlikely(!srf->offsets)) {
775 ret = -ENOMEM; 775 ret = -ENOMEM;
776 goto out_no_offsets; 776 goto out_no_offsets;
777 } 777 }
778 778
779 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
780 req->size_addr;
781
782 ret = copy_from_user(srf->sizes, user_sizes,
783 srf->num_sizes * sizeof(*srf->sizes));
784 if (unlikely(ret != 0)) {
785 ret = -EFAULT;
786 goto out_no_copy;
787 }
788
789 srf->base_size = *srf->sizes; 779 srf->base_size = *srf->sizes;
790 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 780 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
791 srf->multisample_count = 0; 781 srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
923 913
924 ret = -EINVAL; 914 ret = -EINVAL;
925 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); 915 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
926 if (unlikely(base == NULL)) { 916 if (unlikely(!base)) {
927 DRM_ERROR("Could not find surface to reference.\n"); 917 DRM_ERROR("Could not find surface to reference.\n");
928 goto out_no_lookup; 918 goto out_no_lookup;
929 } 919 }
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1069 1059
1070 cmd = vmw_fifo_reserve(dev_priv, submit_len); 1060 cmd = vmw_fifo_reserve(dev_priv, submit_len);
1071 cmd2 = (typeof(cmd2))cmd; 1061 cmd2 = (typeof(cmd2))cmd;
1072 if (unlikely(cmd == NULL)) { 1062 if (unlikely(!cmd)) {
1073 DRM_ERROR("Failed reserving FIFO space for surface " 1063 DRM_ERROR("Failed reserving FIFO space for surface "
1074 "creation.\n"); 1064 "creation.\n");
1075 ret = -ENOMEM; 1065 ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
1135 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); 1125 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1136 1126
1137 cmd1 = vmw_fifo_reserve(dev_priv, submit_size); 1127 cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1138 if (unlikely(cmd1 == NULL)) { 1128 if (unlikely(!cmd1)) {
1139 DRM_ERROR("Failed reserving FIFO space for surface " 1129 DRM_ERROR("Failed reserving FIFO space for surface "
1140 "binding.\n"); 1130 "binding.\n");
1141 return -ENOMEM; 1131 return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
1185 1175
1186 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1176 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1187 cmd = vmw_fifo_reserve(dev_priv, submit_size); 1177 cmd = vmw_fifo_reserve(dev_priv, submit_size);
1188 if (unlikely(cmd == NULL)) { 1178 if (unlikely(!cmd)) {
1189 DRM_ERROR("Failed reserving FIFO space for surface " 1179 DRM_ERROR("Failed reserving FIFO space for surface "
1190 "unbinding.\n"); 1180 "unbinding.\n");
1191 return -ENOMEM; 1181 return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1244 vmw_binding_res_list_scrub(&res->binding_head); 1234 vmw_binding_res_list_scrub(&res->binding_head);
1245 1235
1246 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1236 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1247 if (unlikely(cmd == NULL)) { 1237 if (unlikely(!cmd)) {
1248 DRM_ERROR("Failed reserving FIFO space for surface " 1238 DRM_ERROR("Failed reserving FIFO space for surface "
1249 "destruction.\n"); 1239 "destruction.\n");
1250 mutex_unlock(&dev_priv->binding_mutex); 1240 mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1410 1400
1411 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1401 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1412 srf = &user_srf->srf; 1402 srf = &user_srf->srf;
1413 if (srf->res.backup == NULL) { 1403 if (!srf->res.backup) {
1414 DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1404 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1415 goto out_bad_resource; 1405 goto out_bad_resource;
1416 } 1406 }
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1524 } 1514 }
1525 1515
1526 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 1516 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1527 if (unlikely(user_srf == NULL)) { 1517 if (unlikely(!user_srf)) {
1528 ret = -ENOMEM; 1518 ret = -ENOMEM;
1529 goto out_no_user_srf; 1519 goto out_no_user_srf;
1530 } 1520 }