aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c18
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c26
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/radeon/ni.c16
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r300.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c28
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c17
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c13
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c2
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/uapi/drm/radeon_drm.h2
34 files changed, 285 insertions, 106 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7be21781d3bd..dfa9769b26b5 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3471,18 +3471,21 @@ static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
3471 3471
3472 if (hdmi[6] & DRM_EDID_HDMI_DC_30) { 3472 if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
3473 dc_bpc = 10; 3473 dc_bpc = 10;
3474 info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
3474 DRM_DEBUG("%s: HDMI sink does deep color 30.\n", 3475 DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
3475 connector->name); 3476 connector->name);
3476 } 3477 }
3477 3478
3478 if (hdmi[6] & DRM_EDID_HDMI_DC_36) { 3479 if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
3479 dc_bpc = 12; 3480 dc_bpc = 12;
3481 info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
3480 DRM_DEBUG("%s: HDMI sink does deep color 36.\n", 3482 DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
3481 connector->name); 3483 connector->name);
3482 } 3484 }
3483 3485
3484 if (hdmi[6] & DRM_EDID_HDMI_DC_48) { 3486 if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
3485 dc_bpc = 16; 3487 dc_bpc = 16;
3488 info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
3486 DRM_DEBUG("%s: HDMI sink does deep color 48.\n", 3489 DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
3487 connector->name); 3490 connector->name);
3488 } 3491 }
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 76c30f2da3fb..26c12a3fe430 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -962,6 +962,9 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
962 struct radeon_connector_atom_dig *dig_connector = 962 struct radeon_connector_atom_dig *dig_connector =
963 radeon_connector->con_priv; 963 radeon_connector->con_priv;
964 int dp_clock; 964 int dp_clock;
965
966 /* Assign mode clock for hdmi deep color max clock limit check */
967 radeon_connector->pixelclock_for_modeset = mode->clock;
965 radeon_crtc->bpc = radeon_get_monitor_bpc(connector); 968 radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
966 969
967 switch (encoder_mode) { 970 switch (encoder_mode) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 69a00d64716e..dcd4518a9b08 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -80,6 +80,7 @@ extern int sumo_rlc_init(struct radeon_device *rdev);
80extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 80extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
81extern void si_rlc_reset(struct radeon_device *rdev); 81extern void si_rlc_reset(struct radeon_device *rdev);
82extern void si_init_uvd_internal_cg(struct radeon_device *rdev); 82extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
83static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
83extern int cik_sdma_resume(struct radeon_device *rdev); 84extern int cik_sdma_resume(struct radeon_device *rdev);
84extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); 85extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
85extern void cik_sdma_fini(struct radeon_device *rdev); 86extern void cik_sdma_fini(struct radeon_device *rdev);
@@ -3257,7 +3258,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3257 u32 mc_shared_chmap, mc_arb_ramcfg; 3258 u32 mc_shared_chmap, mc_arb_ramcfg;
3258 u32 hdp_host_path_cntl; 3259 u32 hdp_host_path_cntl;
3259 u32 tmp; 3260 u32 tmp;
3260 int i, j; 3261 int i, j, k;
3261 3262
3262 switch (rdev->family) { 3263 switch (rdev->family) {
3263 case CHIP_BONAIRE: 3264 case CHIP_BONAIRE:
@@ -3446,6 +3447,15 @@ static void cik_gpu_init(struct radeon_device *rdev)
3446 rdev->config.cik.max_sh_per_se, 3447 rdev->config.cik.max_sh_per_se,
3447 rdev->config.cik.max_backends_per_se); 3448 rdev->config.cik.max_backends_per_se);
3448 3449
3450 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
3451 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
3452 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) {
3453 rdev->config.cik.active_cus +=
3454 hweight32(cik_get_cu_active_bitmap(rdev, i, j));
3455 }
3456 }
3457 }
3458
3449 /* set HW defaults for 3D engine */ 3459 /* set HW defaults for 3D engine */
3450 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 3460 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3451 3461
@@ -3698,7 +3708,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3698 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3708 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3699 3709
3700 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3710 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
3701 radeon_ring_write(ring, addr & 0xffffffff); 3711 radeon_ring_write(ring, lower_32_bits(addr));
3702 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3712 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3703 3713
3704 return true; 3714 return true;
@@ -3818,7 +3828,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3818 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3828 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3819 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1)); 3829 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
3820 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3830 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3821 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 3831 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3822 radeon_ring_write(ring, next_rptr); 3832 radeon_ring_write(ring, next_rptr);
3823 } 3833 }
3824 3834
@@ -5446,7 +5456,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5446 (u32)(rdev->dummy_page.addr >> 12)); 5456 (u32)(rdev->dummy_page.addr >> 12));
5447 WREG32(VM_CONTEXT1_CNTL2, 4); 5457 WREG32(VM_CONTEXT1_CNTL2, 4);
5448 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 5458 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
5449 PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) | 5459 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
5450 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 5460 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5451 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 5461 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5452 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 5462 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 1347162ca1a4..8e9d0f1d858e 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -141,7 +141,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
141 next_rptr += 4; 141 next_rptr += 4;
142 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 142 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
143 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 143 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
144 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 144 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
145 radeon_ring_write(ring, 1); /* number of DWs to follow */ 145 radeon_ring_write(ring, 1); /* number of DWs to follow */
146 radeon_ring_write(ring, next_rptr); 146 radeon_ring_write(ring, next_rptr);
147 } 147 }
@@ -151,7 +151,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
151 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 151 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
152 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 152 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
153 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 153 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
154 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 154 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
155 radeon_ring_write(ring, ib->length_dw); 155 radeon_ring_write(ring, ib->length_dw);
156 156
157} 157}
@@ -203,8 +203,8 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
203 203
204 /* write the fence */ 204 /* write the fence */
205 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 205 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
206 radeon_ring_write(ring, addr & 0xffffffff); 206 radeon_ring_write(ring, lower_32_bits(addr));
207 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 207 radeon_ring_write(ring, upper_32_bits(addr));
208 radeon_ring_write(ring, fence->seq); 208 radeon_ring_write(ring, fence->seq);
209 /* generate an interrupt */ 209 /* generate an interrupt */
210 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 210 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
@@ -233,7 +233,7 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
233 233
234 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 234 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
235 radeon_ring_write(ring, addr & 0xfffffff8); 235 radeon_ring_write(ring, addr & 0xfffffff8);
236 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 236 radeon_ring_write(ring, upper_32_bits(addr));
237 237
238 return true; 238 return true;
239} 239}
@@ -551,10 +551,10 @@ int cik_copy_dma(struct radeon_device *rdev,
551 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); 551 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
552 radeon_ring_write(ring, cur_size_in_bytes); 552 radeon_ring_write(ring, cur_size_in_bytes);
553 radeon_ring_write(ring, 0); /* src/dst endian swap */ 553 radeon_ring_write(ring, 0); /* src/dst endian swap */
554 radeon_ring_write(ring, src_offset & 0xffffffff); 554 radeon_ring_write(ring, lower_32_bits(src_offset));
555 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 555 radeon_ring_write(ring, upper_32_bits(src_offset));
556 radeon_ring_write(ring, dst_offset & 0xffffffff); 556 radeon_ring_write(ring, lower_32_bits(dst_offset));
557 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 557 radeon_ring_write(ring, upper_32_bits(dst_offset));
558 src_offset += cur_size_in_bytes; 558 src_offset += cur_size_in_bytes;
559 dst_offset += cur_size_in_bytes; 559 dst_offset += cur_size_in_bytes;
560 } 560 }
@@ -605,7 +605,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
605 } 605 }
606 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 606 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
607 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 607 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
608 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); 608 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
609 radeon_ring_write(ring, 1); /* number of DWs to follow */ 609 radeon_ring_write(ring, 1); /* number of DWs to follow */
610 radeon_ring_write(ring, 0xDEADBEEF); 610 radeon_ring_write(ring, 0xDEADBEEF);
611 radeon_ring_unlock_commit(rdev, ring); 611 radeon_ring_unlock_commit(rdev, ring);
@@ -660,7 +660,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
660 660
661 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 661 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
662 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 662 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
663 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; 663 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
664 ib.ptr[3] = 1; 664 ib.ptr[3] = 1;
665 ib.ptr[4] = 0xDEADBEEF; 665 ib.ptr[4] = 0xDEADBEEF;
666 ib.length_dw = 5; 666 ib.length_dw = 5;
@@ -752,9 +752,9 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
752 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 752 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
753 ib->ptr[ib->length_dw++] = bytes; 753 ib->ptr[ib->length_dw++] = bytes;
754 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 754 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
755 ib->ptr[ib->length_dw++] = src & 0xffffffff; 755 ib->ptr[ib->length_dw++] = lower_32_bits(src);
756 ib->ptr[ib->length_dw++] = upper_32_bits(src); 756 ib->ptr[ib->length_dw++] = upper_32_bits(src);
757 ib->ptr[ib->length_dw++] = pe & 0xffffffff; 757 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
758 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 758 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
759 759
760 pe += bytes; 760 pe += bytes;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 653eff814504..e2f605224e8c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3337,6 +3337,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3337 disabled_rb_mask &= ~(1 << i); 3337 disabled_rb_mask &= ~(1 << i);
3338 } 3338 }
3339 3339
3340 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
3341 u32 simd_disable_bitmap;
3342
3343 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3344 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3345 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
3346 simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
3347 tmp <<= 16;
3348 tmp |= simd_disable_bitmap;
3349 }
3350 rdev->config.evergreen.active_simds = hweight32(~tmp);
3351
3340 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3352 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3341 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3353 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3342 3354
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 1d3209ffbbdc..5a33ca681867 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1057,6 +1057,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1057 disabled_rb_mask &= ~(1 << i); 1057 disabled_rb_mask &= ~(1 << i);
1058 } 1058 }
1059 1059
1060 for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1061 u32 simd_disable_bitmap;
1062
1063 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1064 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1065 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1066 simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1067 tmp <<= 16;
1068 tmp |= simd_disable_bitmap;
1069 }
1070 rdev->config.cayman.active_simds = hweight32(~tmp);
1071
1060 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 1072 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1061 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 1073 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1062 1074
@@ -1268,7 +1280,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1268 (u32)(rdev->dummy_page.addr >> 12)); 1280 (u32)(rdev->dummy_page.addr >> 12));
1269 WREG32(VM_CONTEXT1_CNTL2, 4); 1281 WREG32(VM_CONTEXT1_CNTL2, 4);
1270 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 1282 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1271 PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) | 1283 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
1272 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1284 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1273 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 1285 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1274 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1286 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -1346,7 +1358,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
1346 /* EVENT_WRITE_EOP - flush caches, send int */ 1358 /* EVENT_WRITE_EOP - flush caches, send int */
1347 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 1359 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1348 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 1360 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1349 radeon_ring_write(ring, addr & 0xffffffff); 1361 radeon_ring_write(ring, lower_32_bits(addr));
1350 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 1362 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1351 radeon_ring_write(ring, fence->seq); 1363 radeon_ring_write(ring, fence->seq);
1352 radeon_ring_write(ring, 0); 1364 radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad99813cfa8f..1544efcf1c3a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -682,15 +682,11 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
682 WREG32(RADEON_AIC_HI_ADDR, 0); 682 WREG32(RADEON_AIC_HI_ADDR, 0);
683} 683}
684 684
685int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 685void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
686 uint64_t addr)
686{ 687{
687 u32 *gtt = rdev->gart.ptr; 688 u32 *gtt = rdev->gart.ptr;
688
689 if (i < 0 || i > rdev->gart.num_gpu_pages) {
690 return -EINVAL;
691 }
692 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 689 gtt[i] = cpu_to_le32(lower_32_bits(addr));
693 return 0;
694} 690}
695 691
696void r100_pci_gart_fini(struct radeon_device *rdev) 692void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 206caf9700b7..3c21d77a483d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -72,13 +72,11 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
72#define R300_PTE_WRITEABLE (1 << 2) 72#define R300_PTE_WRITEABLE (1 << 2)
73#define R300_PTE_READABLE (1 << 3) 73#define R300_PTE_READABLE (1 << 3)
74 74
75int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 75void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
76 uint64_t addr)
76{ 77{
77 void __iomem *ptr = rdev->gart.ptr; 78 void __iomem *ptr = rdev->gart.ptr;
78 79
79 if (i < 0 || i > rdev->gart.num_gpu_pages) {
80 return -EINVAL;
81 }
82 addr = (lower_32_bits(addr) >> 8) | 80 addr = (lower_32_bits(addr) >> 8) |
83 ((upper_32_bits(addr) & 0xff) << 24) | 81 ((upper_32_bits(addr) & 0xff) << 24) |
84 R300_PTE_WRITEABLE | R300_PTE_READABLE; 82 R300_PTE_WRITEABLE | R300_PTE_READABLE;
@@ -86,7 +84,6 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
86 * on powerpc without HW swappers, it'll get swapped on way 84 * on powerpc without HW swappers, it'll get swapped on way
87 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 85 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
88 writel(addr, ((void __iomem *)ptr) + (i * 4)); 86 writel(addr, ((void __iomem *)ptr) + (i * 4));
89 return 0;
90} 87}
91 88
92int rv370_pcie_gart_init(struct radeon_device *rdev) 89int rv370_pcie_gart_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c75881223d18..c66952d4b00c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1958,6 +1958,9 @@ static void r600_gpu_init(struct radeon_device *rdev)
1958 if (tmp < rdev->config.r600.max_simds) { 1958 if (tmp < rdev->config.r600.max_simds) {
1959 rdev->config.r600.max_simds = tmp; 1959 rdev->config.r600.max_simds = tmp;
1960 } 1960 }
1961 tmp = rdev->config.r600.max_simds -
1962 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1963 rdev->config.r600.active_simds = tmp;
1961 1964
1962 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 1965 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1963 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1966 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
@@ -2724,7 +2727,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2724 /* EVENT_WRITE_EOP - flush caches, send int */ 2727 /* EVENT_WRITE_EOP - flush caches, send int */
2725 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2728 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2726 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2729 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2727 radeon_ring_write(ring, addr & 0xffffffff); 2730 radeon_ring_write(ring, lower_32_bits(addr));
2728 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2731 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2729 radeon_ring_write(ring, fence->seq); 2732 radeon_ring_write(ring, fence->seq);
2730 radeon_ring_write(ring, 0); 2733 radeon_ring_write(ring, 0);
@@ -2763,7 +2766,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2763 sel |= PACKET3_SEM_WAIT_ON_SIGNAL; 2766 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2764 2767
2765 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2768 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2766 radeon_ring_write(ring, addr & 0xffffffff); 2769 radeon_ring_write(ring, lower_32_bits(addr));
2767 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2770 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2768 2771
2769 return true; 2772 return true;
@@ -2824,9 +2827,9 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2824 if (size_in_bytes == 0) 2827 if (size_in_bytes == 0)
2825 tmp |= PACKET3_CP_DMA_CP_SYNC; 2828 tmp |= PACKET3_CP_DMA_CP_SYNC;
2826 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); 2829 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2827 radeon_ring_write(ring, src_offset & 0xffffffff); 2830 radeon_ring_write(ring, lower_32_bits(src_offset));
2828 radeon_ring_write(ring, tmp); 2831 radeon_ring_write(ring, tmp);
2829 radeon_ring_write(ring, dst_offset & 0xffffffff); 2832 radeon_ring_write(ring, lower_32_bits(dst_offset));
2830 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 2833 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2831 radeon_ring_write(ring, cur_size_in_bytes); 2834 radeon_ring_write(ring, cur_size_in_bytes);
2832 src_offset += cur_size_in_bytes; 2835 src_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 7501ba318c67..4b0bbf88d5c0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -100,6 +100,8 @@ extern int radeon_dpm;
100extern int radeon_aspm; 100extern int radeon_aspm;
101extern int radeon_runtime_pm; 101extern int radeon_runtime_pm;
102extern int radeon_hard_reset; 102extern int radeon_hard_reset;
103extern int radeon_vm_size;
104extern int radeon_vm_block_size;
103 105
104/* 106/*
105 * Copy from radeon_drv.h so we don't have to include both and have conflicting 107 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -837,13 +839,8 @@ struct radeon_mec {
837/* maximum number of VMIDs */ 839/* maximum number of VMIDs */
838#define RADEON_NUM_VM 16 840#define RADEON_NUM_VM 16
839 841
840/* defines number of bits in page table versus page directory,
841 * a page is 4KB so we have 12 bits offset, 9 bits in the page
842 * table and the remaining 19 bits are in the page directory */
843#define RADEON_VM_BLOCK_SIZE 9
844
845/* number of entries in page table */ 842/* number of entries in page table */
846#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) 843#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
847 844
848/* PTBs (Page Table Blocks) need to be aligned to 32K */ 845/* PTBs (Page Table Blocks) need to be aligned to 32K */
849#define RADEON_VM_PTB_ALIGN_SIZE 32768 846#define RADEON_VM_PTB_ALIGN_SIZE 32768
@@ -997,8 +994,8 @@ struct radeon_cs_reloc {
997 struct radeon_bo *robj; 994 struct radeon_bo *robj;
998 struct ttm_validate_buffer tv; 995 struct ttm_validate_buffer tv;
999 uint64_t gpu_offset; 996 uint64_t gpu_offset;
1000 unsigned domain; 997 unsigned prefered_domains;
1001 unsigned alt_domain; 998 unsigned allowed_domains;
1002 uint32_t tiling_flags; 999 uint32_t tiling_flags;
1003 uint32_t handle; 1000 uint32_t handle;
1004}; 1001};
@@ -1782,7 +1779,8 @@ struct radeon_asic {
1782 /* gart */ 1779 /* gart */
1783 struct { 1780 struct {
1784 void (*tlb_flush)(struct radeon_device *rdev); 1781 void (*tlb_flush)(struct radeon_device *rdev);
1785 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1782 void (*set_page)(struct radeon_device *rdev, unsigned i,
1783 uint64_t addr);
1786 } gart; 1784 } gart;
1787 struct { 1785 struct {
1788 int (*init)(struct radeon_device *rdev); 1786 int (*init)(struct radeon_device *rdev);
@@ -1934,6 +1932,7 @@ struct r600_asic {
1934 unsigned tiling_group_size; 1932 unsigned tiling_group_size;
1935 unsigned tile_config; 1933 unsigned tile_config;
1936 unsigned backend_map; 1934 unsigned backend_map;
1935 unsigned active_simds;
1937}; 1936};
1938 1937
1939struct rv770_asic { 1938struct rv770_asic {
@@ -1959,6 +1958,7 @@ struct rv770_asic {
1959 unsigned tiling_group_size; 1958 unsigned tiling_group_size;
1960 unsigned tile_config; 1959 unsigned tile_config;
1961 unsigned backend_map; 1960 unsigned backend_map;
1961 unsigned active_simds;
1962}; 1962};
1963 1963
1964struct evergreen_asic { 1964struct evergreen_asic {
@@ -1985,6 +1985,7 @@ struct evergreen_asic {
1985 unsigned tiling_group_size; 1985 unsigned tiling_group_size;
1986 unsigned tile_config; 1986 unsigned tile_config;
1987 unsigned backend_map; 1987 unsigned backend_map;
1988 unsigned active_simds;
1988}; 1989};
1989 1990
1990struct cayman_asic { 1991struct cayman_asic {
@@ -2023,6 +2024,7 @@ struct cayman_asic {
2023 unsigned multi_gpu_tile_size; 2024 unsigned multi_gpu_tile_size;
2024 2025
2025 unsigned tile_config; 2026 unsigned tile_config;
2027 unsigned active_simds;
2026}; 2028};
2027 2029
2028struct si_asic { 2030struct si_asic {
@@ -2053,6 +2055,7 @@ struct si_asic {
2053 2055
2054 unsigned tile_config; 2056 unsigned tile_config;
2055 uint32_t tile_mode_array[32]; 2057 uint32_t tile_mode_array[32];
2058 uint32_t active_cus;
2056}; 2059};
2057 2060
2058struct cik_asic { 2061struct cik_asic {
@@ -2084,6 +2087,7 @@ struct cik_asic {
2084 unsigned tile_config; 2087 unsigned tile_config;
2085 uint32_t tile_mode_array[32]; 2088 uint32_t tile_mode_array[32];
2086 uint32_t macrotile_mode_array[16]; 2089 uint32_t macrotile_mode_array[16];
2090 uint32_t active_cus;
2087}; 2091};
2088 2092
2089union radeon_asic_config { 2093union radeon_asic_config {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 34ea53d980a1..34b9aa9e3c06 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2029,8 +2029,8 @@ static struct radeon_asic ci_asic = {
2029 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2029 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2030 .dma = &cik_copy_dma, 2030 .dma = &cik_copy_dma,
2031 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2031 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2032 .copy = &cik_copy_cpdma, 2032 .copy = &cik_copy_dma,
2033 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2033 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2034 }, 2034 },
2035 .surface = { 2035 .surface = {
2036 .set_reg = r600_set_surface_reg, 2036 .set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 0eab015b2cfb..01e7c0ad8f01 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,7 +67,8 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
67int r100_asic_reset(struct radeon_device *rdev); 67int r100_asic_reset(struct radeon_device *rdev);
68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
69void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 69void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
70int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 70void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
71 uint64_t addr);
71void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 72void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
72int r100_irq_set(struct radeon_device *rdev); 73int r100_irq_set(struct radeon_device *rdev);
73int r100_irq_process(struct radeon_device *rdev); 74int r100_irq_process(struct radeon_device *rdev);
@@ -171,7 +172,8 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
171 struct radeon_fence *fence); 172 struct radeon_fence *fence);
172extern int r300_cs_parse(struct radeon_cs_parser *p); 173extern int r300_cs_parse(struct radeon_cs_parser *p);
173extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 174extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
174extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 175extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
176 uint64_t addr);
175extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 177extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
176extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 178extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
177extern void r300_set_reg_safe(struct radeon_device *rdev); 179extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -206,7 +208,8 @@ extern void rs400_fini(struct radeon_device *rdev);
206extern int rs400_suspend(struct radeon_device *rdev); 208extern int rs400_suspend(struct radeon_device *rdev);
207extern int rs400_resume(struct radeon_device *rdev); 209extern int rs400_resume(struct radeon_device *rdev);
208void rs400_gart_tlb_flush(struct radeon_device *rdev); 210void rs400_gart_tlb_flush(struct radeon_device *rdev);
209int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 211void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
212 uint64_t addr);
210uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 213uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
211void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 214void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
212int rs400_gart_init(struct radeon_device *rdev); 215int rs400_gart_init(struct radeon_device *rdev);
@@ -229,7 +232,8 @@ int rs600_irq_process(struct radeon_device *rdev);
229void rs600_irq_disable(struct radeon_device *rdev); 232void rs600_irq_disable(struct radeon_device *rdev);
230u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 233u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
231void rs600_gart_tlb_flush(struct radeon_device *rdev); 234void rs600_gart_tlb_flush(struct radeon_device *rdev);
232int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 235void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
236 uint64_t addr);
233uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 237uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
234void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 238void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
235void rs600_bandwidth_update(struct radeon_device *rdev); 239void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4522f7dce653..933c5c39654d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -101,6 +101,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
101 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 101 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
102 struct radeon_connector_atom_dig *dig_connector; 102 struct radeon_connector_atom_dig *dig_connector;
103 int bpc = 8; 103 int bpc = 8;
104 int mode_clock, max_tmds_clock;
104 105
105 switch (connector->connector_type) { 106 switch (connector->connector_type) {
106 case DRM_MODE_CONNECTOR_DVII: 107 case DRM_MODE_CONNECTOR_DVII:
@@ -166,6 +167,36 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
166 connector->name, bpc); 167 connector->name, bpc);
167 bpc = 12; 168 bpc = 12;
168 } 169 }
170
171 /* Any defined maximum tmds clock limit we must not exceed? */
172 if (connector->max_tmds_clock > 0) {
173 /* mode_clock is clock in kHz for mode to be modeset on this connector */
174 mode_clock = radeon_connector->pixelclock_for_modeset;
175
176 /* Maximum allowable input clock in kHz */
177 max_tmds_clock = connector->max_tmds_clock * 1000;
178
179 DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
180 connector->name, mode_clock, max_tmds_clock);
181
182 /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
183 if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
184 if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
185 (mode_clock * 5/4 <= max_tmds_clock))
186 bpc = 10;
187 else
188 bpc = 8;
189
190 DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
191 connector->name, bpc);
192 }
193
194 if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
195 bpc = 8;
196 DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
197 connector->name, bpc);
198 }
199 }
169 } 200 }
170 201
171 DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", 202 DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41ecf8a60611..71a143461478 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -140,10 +140,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
140 if (p->ring == R600_RING_TYPE_UVD_INDEX && 140 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
141 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 141 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
142 /* TODO: is this still needed for NI+ ? */ 142 /* TODO: is this still needed for NI+ ? */
143 p->relocs[i].domain = 143 p->relocs[i].prefered_domains =
144 RADEON_GEM_DOMAIN_VRAM; 144 RADEON_GEM_DOMAIN_VRAM;
145 145
146 p->relocs[i].alt_domain = 146 p->relocs[i].allowed_domains =
147 RADEON_GEM_DOMAIN_VRAM; 147 RADEON_GEM_DOMAIN_VRAM;
148 148
149 /* prioritize this over any other relocation */ 149 /* prioritize this over any other relocation */
@@ -158,10 +158,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
158 return -EINVAL; 158 return -EINVAL;
159 } 159 }
160 160
161 p->relocs[i].domain = domain; 161 p->relocs[i].prefered_domains = domain;
162 if (domain == RADEON_GEM_DOMAIN_VRAM) 162 if (domain == RADEON_GEM_DOMAIN_VRAM)
163 domain |= RADEON_GEM_DOMAIN_GTT; 163 domain |= RADEON_GEM_DOMAIN_GTT;
164 p->relocs[i].alt_domain = domain; 164 p->relocs[i].allowed_domains = domain;
165 } 165 }
166 166
167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 31565de1116c..03686fab842d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1052,6 +1052,43 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1052 radeon_agpmode = 0; 1052 radeon_agpmode = 0;
1053 break; 1053 break;
1054 } 1054 }
1055
1056 if (!radeon_check_pot_argument(radeon_vm_size)) {
1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1058 radeon_vm_size);
1059 radeon_vm_size = 4096;
1060 }
1061
1062 if (radeon_vm_size < 4) {
1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
1064 radeon_vm_size);
1065 radeon_vm_size = 4096;
1066 }
1067
1068 /*
1069 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1070 */
1071 if (radeon_vm_size > 1024*1024) {
1072 dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
1073 radeon_vm_size);
1074 radeon_vm_size = 4096;
1075 }
1076
1077 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) {
1081 dev_warn(rdev->dev, "VM page table size (%d) to small\n",
1082 radeon_vm_block_size);
1083 radeon_vm_block_size = 9;
1084 }
1085
1086 if (radeon_vm_block_size > 24 ||
1087 radeon_vm_size < (1ull << radeon_vm_block_size)) {
1088 dev_warn(rdev->dev, "VM page table size (%d) to large\n",
1089 radeon_vm_block_size);
1090 radeon_vm_block_size = 9;
1091 }
1055} 1092}
1056 1093
1057/** 1094/**
@@ -1197,17 +1234,16 @@ int radeon_device_init(struct radeon_device *rdev,
1197 if (r) 1234 if (r)
1198 return r; 1235 return r;
1199 1236
1237 radeon_check_arguments(rdev);
1200 /* Adjust VM size here. 1238 /* Adjust VM size here.
1201 * Currently set to 4GB ((1 << 20) 4k pages). 1239 * Max GPUVM size for cayman+ is 40 bits.
1202 * Max GPUVM size for cayman and SI is 40 bits.
1203 */ 1240 */
1204 rdev->vm_manager.max_pfn = 1 << 20; 1241 rdev->vm_manager.max_pfn = radeon_vm_size << 8;
1205 1242
1206 /* Set asic functions */ 1243 /* Set asic functions */
1207 r = radeon_asic_init(rdev); 1244 r = radeon_asic_init(rdev);
1208 if (r) 1245 if (r)
1209 return r; 1246 return r;
1210 radeon_check_arguments(rdev);
1211 1247
1212 /* all of the newer IGP chips have an internal gart 1248 /* all of the newer IGP chips have an internal gart
1213 * However some rs4xx report as AGP, so remove that here. 1249 * However some rs4xx report as AGP, so remove that here.
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a4e725c6b8c8..5ed617056b9c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -462,9 +462,6 @@ static void radeon_flip_work_func(struct work_struct *__work)
462 /* We borrow the event spin lock for protecting flip_work */ 462 /* We borrow the event spin lock for protecting flip_work */
463 spin_lock_irqsave(&crtc->dev->event_lock, flags); 463 spin_lock_irqsave(&crtc->dev->event_lock, flags);
464 464
465 /* update crtc fb */
466 crtc->primary->fb = fb;
467
468 /* set the proper interrupt */ 465 /* set the proper interrupt */
469 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 466 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
470 467
@@ -539,6 +536,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
539 } 536 }
540 radeon_crtc->flip_work = work; 537 radeon_crtc->flip_work = work;
541 538
539 /* update crtc fb */
540 crtc->primary->fb = fb;
541
542 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 542 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
543 543
544 queue_work(radeon_crtc->flip_queue, &work->flip_work); 544 queue_work(radeon_crtc->flip_queue, &work->flip_work);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 15447a4119f4..6e3017413386 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,9 +81,10 @@
81 * 2.37.0 - allow GS ring setup on r6xx/r7xx 81 * 2.37.0 - allow GS ring setup on r6xx/r7xx
82 * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN), 82 * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG 83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
84 * 2.39.0 - Add INFO query for number of active CUs
84 */ 85 */
85#define KMS_DRIVER_MAJOR 2 86#define KMS_DRIVER_MAJOR 2
86#define KMS_DRIVER_MINOR 38 87#define KMS_DRIVER_MINOR 39
87#define KMS_DRIVER_PATCHLEVEL 0 88#define KMS_DRIVER_PATCHLEVEL 0
88int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 89int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
89int radeon_driver_unload_kms(struct drm_device *dev); 90int radeon_driver_unload_kms(struct drm_device *dev);
@@ -172,6 +173,8 @@ int radeon_dpm = -1;
172int radeon_aspm = -1; 173int radeon_aspm = -1;
173int radeon_runtime_pm = -1; 174int radeon_runtime_pm = -1;
174int radeon_hard_reset = 0; 175int radeon_hard_reset = 0;
176int radeon_vm_size = 4096;
177int radeon_vm_block_size = 9;
175 178
176MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 179MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
177module_param_named(no_wb, radeon_no_wb, int, 0444); 180module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -239,6 +242,12 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
239MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); 242MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
240module_param_named(hard_reset, radeon_hard_reset, int, 0444); 243module_param_named(hard_reset, radeon_hard_reset, int, 0444);
241 244
245MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
246module_param_named(vm_size, radeon_vm_size, int, 0444);
247
248MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
249module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
250
242static struct pci_device_id pciidlist[] = { 251static struct pci_device_id pciidlist[] = {
243 radeon_PCI_IDS 252 radeon_PCI_IDS
244}; 253};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index a77b1c13ea43..913787085dfa 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -819,15 +819,35 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
819 return 0; 819 return 0;
820} 820}
821 821
822/**
823 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
824 *
825 * Manually trigger a gpu reset at the next fence wait.
826 */
827static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
828{
829 struct drm_info_node *node = (struct drm_info_node *) m->private;
830 struct drm_device *dev = node->minor->dev;
831 struct radeon_device *rdev = dev->dev_private;
832
833 down_read(&rdev->exclusive_lock);
834 seq_printf(m, "%d\n", rdev->needs_reset);
835 rdev->needs_reset = true;
836 up_read(&rdev->exclusive_lock);
837
838 return 0;
839}
840
822static struct drm_info_list radeon_debugfs_fence_list[] = { 841static struct drm_info_list radeon_debugfs_fence_list[] = {
823 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 842 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
843 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
824}; 844};
825#endif 845#endif
826 846
827int radeon_debugfs_fence_init(struct radeon_device *rdev) 847int radeon_debugfs_fence_init(struct radeon_device *rdev)
828{ 848{
829#if defined(CONFIG_DEBUG_FS) 849#if defined(CONFIG_DEBUG_FS)
830 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 850 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
831#else 851#else
832 return 0; 852 return 0;
833#endif 853#endif
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f0717373a508..35d931881b4b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -513,6 +513,22 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
513 value_size = sizeof(uint64_t); 513 value_size = sizeof(uint64_t);
514 value64 = atomic64_read(&rdev->gtt_usage); 514 value64 = atomic64_read(&rdev->gtt_usage);
515 break; 515 break;
516 case RADEON_INFO_ACTIVE_CU_COUNT:
517 if (rdev->family >= CHIP_BONAIRE)
518 *value = rdev->config.cik.active_cus;
519 else if (rdev->family >= CHIP_TAHITI)
520 *value = rdev->config.si.active_cus;
521 else if (rdev->family >= CHIP_CAYMAN)
522 *value = rdev->config.cayman.active_simds;
523 else if (rdev->family >= CHIP_CEDAR)
524 *value = rdev->config.evergreen.active_simds;
525 else if (rdev->family >= CHIP_RV770)
526 *value = rdev->config.rv770.active_simds;
527 else if (rdev->family >= CHIP_R600)
528 *value = rdev->config.r600.active_simds;
529 else
530 *value = 1;
531 break;
516 default: 532 default:
517 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 533 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
518 return -EINVAL; 534 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ea72ad889a11..ad0e4b8cc7e3 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -506,6 +506,7 @@ struct radeon_connector {
506 struct radeon_i2c_chan *router_bus; 506 struct radeon_i2c_chan *router_bus;
507 enum radeon_connector_audio audio; 507 enum radeon_connector_audio audio;
508 enum radeon_connector_dither dither; 508 enum radeon_connector_dither dither;
509 int pixelclock_for_modeset;
509}; 510};
510 511
511struct radeon_framebuffer { 512struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2918087e572f..6c717b257d6d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,7 +446,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
446 list_for_each_entry(lobj, head, tv.head) { 446 list_for_each_entry(lobj, head, tv.head) {
447 bo = lobj->robj; 447 bo = lobj->robj;
448 if (!bo->pin_count) { 448 if (!bo->pin_count) {
449 u32 domain = lobj->domain; 449 u32 domain = lobj->prefered_domains;
450 u32 current_domain = 450 u32 current_domain =
451 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 451 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
452 452
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
458 * into account. We don't want to disallow buffer moves 458 * into account. We don't want to disallow buffer moves
459 * completely. 459 * completely.
460 */ 460 */
461 if ((lobj->alt_domain & current_domain) != 0 && 461 if ((lobj->allowed_domains & current_domain) != 0 &&
462 (domain & current_domain) == 0 && /* will be moved */ 462 (domain & current_domain) == 0 && /* will be moved */
463 bytes_moved > bytes_moved_threshold) { 463 bytes_moved > bytes_moved_threshold) {
464 /* don't move it */ 464 /* don't move it */
@@ -476,8 +476,9 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
476 initial_bytes_moved; 476 initial_bytes_moved;
477 477
478 if (unlikely(r)) { 478 if (unlikely(r)) {
479 if (r != -ERESTARTSYS && domain != lobj->alt_domain) { 479 if (r != -ERESTARTSYS &&
480 domain = lobj->alt_domain; 480 domain != lobj->allowed_domains) {
481 domain = lobj->allowed_domains;
481 goto retry; 482 goto retry;
482 } 483 }
483 ttm_eu_backoff_reservation(ticket, head); 484 ttm_eu_backoff_reservation(ticket, head);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 2bdae61c0ac0..12c663e86ca1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -984,6 +984,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
984 if (enable) { 984 if (enable) {
985 mutex_lock(&rdev->pm.mutex); 985 mutex_lock(&rdev->pm.mutex);
986 rdev->pm.dpm.uvd_active = true; 986 rdev->pm.dpm.uvd_active = true;
987 /* disable this for now */
988#if 0
987 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 989 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
988 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 990 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
989 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 991 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -993,6 +995,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
993 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 995 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
994 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 996 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
995 else 997 else
998#endif
996 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 999 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
997 rdev->pm.dpm.state = dpm_state; 1000 rdev->pm.dpm.state = dpm_state;
998 mutex_unlock(&rdev->pm.mutex); 1001 mutex_unlock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1b65ae2433cd..a4ad270e8261 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -812,7 +812,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
812 (rdev->pm.dpm.hd != hd)) { 812 (rdev->pm.dpm.hd != hd)) {
813 rdev->pm.dpm.sd = sd; 813 rdev->pm.dpm.sd = sd;
814 rdev->pm.dpm.hd = hd; 814 rdev->pm.dpm.hd = hd;
815 streams_changed = true; 815 /* disable this for now */
816 /*streams_changed = true;*/
816 } 817 }
817 } 818 }
818 819
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 3971d968af6c..aa21c31a846c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
66 case CHIP_BONAIRE: 66 case CHIP_BONAIRE:
67 case CHIP_KAVERI: 67 case CHIP_KAVERI:
68 case CHIP_KABINI: 68 case CHIP_KABINI:
69 case CHIP_HAWAII:
69 case CHIP_MULLINS: 70 case CHIP_MULLINS:
70 fw_name = FIRMWARE_BONAIRE; 71 fw_name = FIRMWARE_BONAIRE;
71 break; 72 break;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index a72e9c81805d..899d9126cad6 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -59,7 +59,7 @@
59 */ 59 */
60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) 60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61{ 61{
62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; 62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
63} 63}
64 64
65/** 65/**
@@ -140,8 +140,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
140 /* add the vm page table to the list */ 140 /* add the vm page table to the list */
141 list[0].gobj = NULL; 141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory; 142 list[0].robj = vm->page_directory;
143 list[0].domain = RADEON_GEM_DOMAIN_VRAM; 143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM; 144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo; 145 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tiling_flags = 0; 146 list[0].tiling_flags = 0;
147 list[0].handle = 0; 147 list[0].handle = 0;
@@ -153,8 +153,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
153 153
154 list[idx].gobj = NULL; 154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo; 155 list[idx].robj = vm->page_tables[i].bo;
156 list[idx].domain = RADEON_GEM_DOMAIN_VRAM; 156 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM; 157 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].tv.bo = &list[idx].robj->tbo; 158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0; 159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0; 160 list[idx].handle = 0;
@@ -474,8 +474,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
474 bo_va->valid = false; 474 bo_va->valid = false;
475 list_move(&bo_va->vm_list, head); 475 list_move(&bo_va->vm_list, head);
476 476
477 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 477 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
478 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 478 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
479
480 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
479 481
480 if (eoffset > vm->max_pde_used) 482 if (eoffset > vm->max_pde_used)
481 vm->max_pde_used = eoffset; 483 vm->max_pde_used = eoffset;
@@ -583,10 +585,9 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
583int radeon_vm_update_page_directory(struct radeon_device *rdev, 585int radeon_vm_update_page_directory(struct radeon_device *rdev,
584 struct radeon_vm *vm) 586 struct radeon_vm *vm)
585{ 587{
586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
587
588 struct radeon_bo *pd = vm->page_directory; 588 struct radeon_bo *pd = vm->page_directory;
589 uint64_t pd_addr = radeon_bo_gpu_offset(pd); 589 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
590 uint32_t incr = RADEON_VM_PTE_COUNT * 8;
590 uint64_t last_pde = ~0, last_pt = ~0; 591 uint64_t last_pde = ~0, last_pt = ~0;
591 unsigned count = 0, pt_idx, ndw; 592 unsigned count = 0, pt_idx, ndw;
592 struct radeon_ib ib; 593 struct radeon_ib ib;
@@ -757,8 +758,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
757 uint64_t start, uint64_t end, 758 uint64_t start, uint64_t end,
758 uint64_t dst, uint32_t flags) 759 uint64_t dst, uint32_t flags)
759{ 760{
760 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; 761 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
761
762 uint64_t last_pte = ~0, last_dst = ~0; 762 uint64_t last_pte = ~0, last_dst = ~0;
763 unsigned count = 0; 763 unsigned count = 0;
764 uint64_t addr; 764 uint64_t addr;
@@ -768,7 +768,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
768 768
769 /* walk over the address space and update the page tables */ 769 /* walk over the address space and update the page tables */
770 for (addr = start; addr < end; ) { 770 for (addr = start; addr < end; ) {
771 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; 771 uint64_t pt_idx = addr >> radeon_vm_block_size;
772 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; 772 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
773 unsigned nptes; 773 unsigned nptes;
774 uint64_t pte; 774 uint64_t pte;
@@ -873,13 +873,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
873 /* padding, etc. */ 873 /* padding, etc. */
874 ndw = 64; 874 ndw = 64;
875 875
876 if (RADEON_VM_BLOCK_SIZE > 11) 876 if (radeon_vm_block_size > 11)
877 /* reserve space for one header for every 2k dwords */ 877 /* reserve space for one header for every 2k dwords */
878 ndw += (nptes >> 11) * 4; 878 ndw += (nptes >> 11) * 4;
879 else 879 else
880 /* reserve space for one header for 880 /* reserve space for one header for
881 every (1 << BLOCK_SIZE) entries */ 881 every (1 << BLOCK_SIZE) entries */
882 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; 882 ndw += (nptes >> radeon_vm_block_size) * 4;
883 883
884 /* reserve space for pte addresses */ 884 /* reserve space for pte addresses */
885 ndw += nptes * 2; 885 ndw += nptes * 2;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 130d5cc50d43..a0f96decece3 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,21 +212,16 @@ void rs400_gart_fini(struct radeon_device *rdev)
212#define RS400_PTE_WRITEABLE (1 << 2) 212#define RS400_PTE_WRITEABLE (1 << 2)
213#define RS400_PTE_READABLE (1 << 3) 213#define RS400_PTE_READABLE (1 << 3)
214 214
215int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 215void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
216{ 216{
217 uint32_t entry; 217 uint32_t entry;
218 u32 *gtt = rdev->gart.ptr; 218 u32 *gtt = rdev->gart.ptr;
219 219
220 if (i < 0 || i > rdev->gart.num_gpu_pages) {
221 return -EINVAL;
222 }
223
224 entry = (lower_32_bits(addr) & PAGE_MASK) | 220 entry = (lower_32_bits(addr) & PAGE_MASK) |
225 ((upper_32_bits(addr) & 0xff) << 4) | 221 ((upper_32_bits(addr) & 0xff) << 4) |
226 RS400_PTE_WRITEABLE | RS400_PTE_READABLE; 222 RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
227 entry = cpu_to_le32(entry); 223 entry = cpu_to_le32(entry);
228 gtt[i] = entry; 224 gtt[i] = entry;
229 return 0;
230} 225}
231 226
232int rs400_mc_wait_for_idle(struct radeon_device *rdev) 227int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 0a8be63926d8..d1a35cb1c91d 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -626,17 +626,16 @@ static void rs600_gart_fini(struct radeon_device *rdev)
626 radeon_gart_table_vram_free(rdev); 626 radeon_gart_table_vram_free(rdev);
627} 627}
628 628
629int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 629void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
630{ 630{
631 void __iomem *ptr = (void *)rdev->gart.ptr; 631 void __iomem *ptr = (void *)rdev->gart.ptr;
632 632
633 if (i < 0 || i > rdev->gart.num_gpu_pages) {
634 return -EINVAL;
635 }
636 addr = addr & 0xFFFFFFFFFFFFF000ULL; 633 addr = addr & 0xFFFFFFFFFFFFF000ULL;
637 addr |= R600_PTE_GART; 634 if (addr == rdev->dummy_page.addr)
635 addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
636 else
637 addr |= R600_PTE_GART;
638 writeq(addr, ptr + (i * 8)); 638 writeq(addr, ptr + (i * 8));
639 return 0;
640} 639}
641 640
642int rs600_irq_set(struct radeon_device *rdev) 641int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97b776666b75..da8703d8d455 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1327,6 +1327,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
1327 if (tmp < rdev->config.rv770.max_simds) { 1327 if (tmp < rdev->config.rv770.max_simds) {
1328 rdev->config.rv770.max_simds = tmp; 1328 rdev->config.rv770.max_simds = tmp;
1329 } 1329 }
1330 tmp = rdev->config.rv770.max_simds -
1331 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
1332 rdev->config.rv770.active_simds = tmp;
1330 1333
1331 switch (rdev->config.rv770.max_tile_pipes) { 1334 switch (rdev->config.rv770.max_tile_pipes) {
1332 case 1: 1335 case 1:
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d64ef9115b69..730cee2c34cf 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -71,6 +71,7 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
71MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); 71MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
72MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); 72MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
73 73
74static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
74static void si_pcie_gen3_enable(struct radeon_device *rdev); 75static void si_pcie_gen3_enable(struct radeon_device *rdev);
75static void si_program_aspm(struct radeon_device *rdev); 76static void si_program_aspm(struct radeon_device *rdev);
76extern void sumo_rlc_fini(struct radeon_device *rdev); 77extern void sumo_rlc_fini(struct radeon_device *rdev);
@@ -2900,7 +2901,7 @@ static void si_gpu_init(struct radeon_device *rdev)
2900 u32 sx_debug_1; 2901 u32 sx_debug_1;
2901 u32 hdp_host_path_cntl; 2902 u32 hdp_host_path_cntl;
2902 u32 tmp; 2903 u32 tmp;
2903 int i, j; 2904 int i, j, k;
2904 2905
2905 switch (rdev->family) { 2906 switch (rdev->family) {
2906 case CHIP_TAHITI: 2907 case CHIP_TAHITI:
@@ -3098,6 +3099,14 @@ static void si_gpu_init(struct radeon_device *rdev)
3098 rdev->config.si.max_sh_per_se, 3099 rdev->config.si.max_sh_per_se,
3099 rdev->config.si.max_cu_per_sh); 3100 rdev->config.si.max_cu_per_sh);
3100 3101
3102 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3103 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3104 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
3105 rdev->config.si.active_cus +=
3106 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3107 }
3108 }
3109 }
3101 3110
3102 /* set HW defaults for 3D engine */ 3111 /* set HW defaults for 3D engine */
3103 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 3112 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -3186,7 +3195,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
3186 /* EVENT_WRITE_EOP - flush caches, send int */ 3195 /* EVENT_WRITE_EOP - flush caches, send int */
3187 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 3196 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3188 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); 3197 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
3189 radeon_ring_write(ring, addr & 0xffffffff); 3198 radeon_ring_write(ring, lower_32_bits(addr));
3190 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 3199 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3191 radeon_ring_write(ring, fence->seq); 3200 radeon_ring_write(ring, fence->seq);
3192 radeon_ring_write(ring, 0); 3201 radeon_ring_write(ring, 0);
@@ -3219,7 +3228,7 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3219 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3228 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3220 radeon_ring_write(ring, (1 << 8)); 3229 radeon_ring_write(ring, (1 << 8));
3221 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3230 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3222 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 3231 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3223 radeon_ring_write(ring, next_rptr); 3232 radeon_ring_write(ring, next_rptr);
3224 } 3233 }
3225 3234
@@ -4095,7 +4104,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
4095 (u32)(rdev->dummy_page.addr >> 12)); 4104 (u32)(rdev->dummy_page.addr >> 12));
4096 WREG32(VM_CONTEXT1_CNTL2, 4); 4105 WREG32(VM_CONTEXT1_CNTL2, 4);
4097 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 4106 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4098 PAGE_TABLE_BLOCK_SIZE(RADEON_VM_BLOCK_SIZE - 9) | 4107 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
4099 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 4108 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4100 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 4109 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4101 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 4110 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 9a660f861d2c..e24c94b6d14d 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -88,8 +88,8 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
88 88
89 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 89 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
90 1, 0, 0, bytes); 90 1, 0, 0, bytes);
91 ib->ptr[ib->length_dw++] = pe & 0xffffffff; 91 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
92 ib->ptr[ib->length_dw++] = src & 0xffffffff; 92 ib->ptr[ib->length_dw++] = lower_32_bits(src);
93 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 93 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
94 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; 94 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
95 95
@@ -220,8 +220,8 @@ int si_copy_dma(struct radeon_device *rdev,
220 cur_size_in_bytes = 0xFFFFF; 220 cur_size_in_bytes = 0xFFFFF;
221 size_in_bytes -= cur_size_in_bytes; 221 size_in_bytes -= cur_size_in_bytes;
222 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); 222 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
223 radeon_ring_write(ring, dst_offset & 0xffffffff); 223 radeon_ring_write(ring, lower_32_bits(dst_offset));
224 radeon_ring_write(ring, src_offset & 0xffffffff); 224 radeon_ring_write(ring, lower_32_bits(src_offset));
225 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 225 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
226 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 226 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
227 src_offset += cur_size_in_bytes; 227 src_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 9a3567bedaae..58918868f894 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1948,6 +1948,10 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1948 si_pi->cac_weights = cac_weights_cape_verde_pro; 1948 si_pi->cac_weights = cac_weights_cape_verde_pro;
1949 si_pi->dte_data = dte_data_cape_verde; 1949 si_pi->dte_data = dte_data_cape_verde;
1950 break; 1950 break;
1951 case 0x682C:
1952 si_pi->cac_weights = cac_weights_cape_verde_pro;
1953 si_pi->dte_data = dte_data_sun_xt;
1954 break;
1951 case 0x6825: 1955 case 0x6825:
1952 case 0x6827: 1956 case 0x6827:
1953 si_pi->cac_weights = cac_weights_heathrow; 1957 si_pi->cac_weights = cac_weights_heathrow;
@@ -1971,10 +1975,9 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1971 si_pi->dte_data = dte_data_venus_xt; 1975 si_pi->dte_data = dte_data_venus_xt;
1972 break; 1976 break;
1973 case 0x6823: 1977 case 0x6823:
1974 si_pi->cac_weights = cac_weights_chelsea_pro;
1975 si_pi->dte_data = dte_data_venus_pro;
1976 break;
1977 case 0x682B: 1978 case 0x682B:
1979 case 0x6822:
1980 case 0x682A:
1978 si_pi->cac_weights = cac_weights_chelsea_pro; 1981 si_pi->cac_weights = cac_weights_chelsea_pro;
1979 si_pi->dte_data = dte_data_venus_pro; 1982 si_pi->dte_data = dte_data_venus_pro;
1980 break; 1983 break;
@@ -1988,6 +1991,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1988 case 0x6601: 1991 case 0x6601:
1989 case 0x6621: 1992 case 0x6621:
1990 case 0x6603: 1993 case 0x6603:
1994 case 0x6605:
1991 si_pi->cac_weights = cac_weights_mars_pro; 1995 si_pi->cac_weights = cac_weights_mars_pro;
1992 si_pi->lcac_config = lcac_mars_pro; 1996 si_pi->lcac_config = lcac_mars_pro;
1993 si_pi->cac_override = cac_override_oland; 1997 si_pi->cac_override = cac_override_oland;
@@ -1998,6 +2002,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1998 case 0x6600: 2002 case 0x6600:
1999 case 0x6606: 2003 case 0x6606:
2000 case 0x6620: 2004 case 0x6620:
2005 case 0x6604:
2001 si_pi->cac_weights = cac_weights_mars_xt; 2006 si_pi->cac_weights = cac_weights_mars_xt;
2002 si_pi->lcac_config = lcac_mars_pro; 2007 si_pi->lcac_config = lcac_mars_pro;
2003 si_pi->cac_override = cac_override_oland; 2008 si_pi->cac_override = cac_override_oland;
@@ -2006,6 +2011,8 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
2006 update_dte_from_pl2 = true; 2011 update_dte_from_pl2 = true;
2007 break; 2012 break;
2008 case 0x6611: 2013 case 0x6611:
2014 case 0x6613:
2015 case 0x6608:
2009 si_pi->cac_weights = cac_weights_oland_pro; 2016 si_pi->cac_weights = cac_weights_oland_pro;
2010 si_pi->lcac_config = lcac_mars_pro; 2017 si_pi->lcac_config = lcac_mars_pro;
2011 si_pi->cac_override = cac_override_oland; 2018 si_pi->cac_override = cac_override_oland;
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index d1771004cb52..8bfdadd56598 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -45,7 +45,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
46 radeon_ring_write(ring, fence->seq); 46 radeon_ring_write(ring, fence->seq);
47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); 47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
48 radeon_ring_write(ring, addr & 0xffffffff); 48 radeon_ring_write(ring, lower_32_bits(addr));
49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); 49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); 51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index a7fac5686915..251b75e6bf7a 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -121,6 +121,9 @@ struct drm_display_info {
121 enum subpixel_order subpixel_order; 121 enum subpixel_order subpixel_order;
122 u32 color_formats; 122 u32 color_formats;
123 123
124 /* Mask of supported hdmi deep color modes */
125 u8 edid_hdmi_dc_modes;
126
124 u8 cea_rev; 127 u8 cea_rev;
125}; 128};
126 129
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index aefa2f6afa3b..1cc0b610f162 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1007,7 +1007,7 @@ struct drm_radeon_cs {
1007#define RADEON_INFO_NUM_BYTES_MOVED 0x1d 1007#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
1008#define RADEON_INFO_VRAM_USAGE 0x1e 1008#define RADEON_INFO_VRAM_USAGE 0x1e
1009#define RADEON_INFO_GTT_USAGE 0x1f 1009#define RADEON_INFO_GTT_USAGE 0x1f
1010 1010#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
1011 1011
1012struct drm_radeon_info { 1012struct drm_radeon_info {
1013 uint32_t request; 1013 uint32_t request;