aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c25
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h8
2 files changed, 26 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69182eeca264..7c06d1b99d99 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1346,7 +1346,7 @@ static int gfx_v9_0_sw_init(void *handle)
1346 return r; 1346 return r;
1347 1347
1348 /* create MQD for all compute queues as wel as KIQ for SRIOV case */ 1348 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1349 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd)); 1349 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1350 if (r) 1350 if (r)
1351 return r; 1351 return r;
1352 1352
@@ -2463,6 +2463,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2463 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2463 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2464 mqd->compute_misc_reserved = 0x00000003; 2464 mqd->compute_misc_reserved = 0x00000003;
2465 2465
2466 mqd->dynamic_cu_mask_addr_lo =
2467 lower_32_bits(ring->mqd_gpu_addr
2468 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2469 mqd->dynamic_cu_mask_addr_hi =
2470 upper_32_bits(ring->mqd_gpu_addr
2471 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2472
2466 eop_base_addr = ring->eop_gpu_addr >> 8; 2473 eop_base_addr = ring->eop_gpu_addr >> 8;
2467 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2474 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2468 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2475 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -2695,7 +2702,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2695 if (adev->gfx.in_reset) { /* for GPU_RESET case */ 2702 if (adev->gfx.in_reset) { /* for GPU_RESET case */
2696 /* reset MQD to a clean status */ 2703 /* reset MQD to a clean status */
2697 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2704 if (adev->gfx.mec.mqd_backup[mqd_idx])
2698 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 2705 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2699 2706
2700 /* reset ring buffer */ 2707 /* reset ring buffer */
2701 ring->wptr = 0; 2708 ring->wptr = 0;
@@ -2707,7 +2714,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2707 soc15_grbm_select(adev, 0, 0, 0, 0); 2714 soc15_grbm_select(adev, 0, 0, 0, 0);
2708 mutex_unlock(&adev->srbm_mutex); 2715 mutex_unlock(&adev->srbm_mutex);
2709 } else { 2716 } else {
2710 memset((void *)mqd, 0, sizeof(*mqd)); 2717 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2718 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2719 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2711 mutex_lock(&adev->srbm_mutex); 2720 mutex_lock(&adev->srbm_mutex);
2712 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2721 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2713 gfx_v9_0_mqd_init(ring); 2722 gfx_v9_0_mqd_init(ring);
@@ -2716,7 +2725,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2716 mutex_unlock(&adev->srbm_mutex); 2725 mutex_unlock(&adev->srbm_mutex);
2717 2726
2718 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2727 if (adev->gfx.mec.mqd_backup[mqd_idx])
2719 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2728 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2720 } 2729 }
2721 2730
2722 return 0; 2731 return 0;
@@ -2729,7 +2738,9 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2729 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 2738 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2730 2739
2731 if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { 2740 if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
2732 memset((void *)mqd, 0, sizeof(*mqd)); 2741 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2742 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2743 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2733 mutex_lock(&adev->srbm_mutex); 2744 mutex_lock(&adev->srbm_mutex);
2734 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2745 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2735 gfx_v9_0_mqd_init(ring); 2746 gfx_v9_0_mqd_init(ring);
@@ -2737,11 +2748,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2737 mutex_unlock(&adev->srbm_mutex); 2748 mutex_unlock(&adev->srbm_mutex);
2738 2749
2739 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2750 if (adev->gfx.mec.mqd_backup[mqd_idx])
2740 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2751 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2741 } else if (adev->gfx.in_reset) { /* for GPU_RESET case */ 2752 } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
2742 /* reset MQD to a clean status */ 2753 /* reset MQD to a clean status */
2743 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2754 if (adev->gfx.mec.mqd_backup[mqd_idx])
2744 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 2755 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2745 2756
2746 /* reset ring buffer */ 2757 /* reset ring buffer */
2747 ring->wptr = 0; 2758 ring->wptr = 0;
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index 56d79db31693..2fb25abaf7c8 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -672,6 +672,14 @@ struct v9_mqd {
672 uint32_t reserved_511; 672 uint32_t reserved_511;
673}; 673};
674 674
675struct v9_mqd_allocation {
676 struct v9_mqd mqd;
677 uint32_t wptr_poll_mem;
678 uint32_t rptr_report_mem;
679 uint32_t dynamic_cu_mask;
680 uint32_t dynamic_rb_mask;
681};
682
675/* from vega10 all CSA format is shifted to chain ib compatible mode */ 683/* from vega10 all CSA format is shifted to chain ib compatible mode */
676struct v9_ce_ib_state { 684struct v9_ce_ib_state {
677 /* section of non chained ib part */ 685 /* section of non chained ib part */