diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2017-06-07 11:05:26 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-06-07 15:43:28 -0400 |
commit | 41f6a99abdb423691b24c12f0a0578755b2c1126 (patch) | |
tree | d3c6b25e148ef331cb29abe10fb2bd9a6f617dd4 /drivers/gpu/drm/amd/amdgpu | |
parent | cf8b611f55a775cc6514485e7dccf1c0e85b938d (diff) |
drm/amdgpu: move gfx_v*_0_compute_queue_acquire to common code
Same function was duplicated in all gfx IP files.
Reviewed-by: Alex Xie <AlexBin.Xie@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 39 |
5 files changed, 42 insertions, 114 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 19943356cca7..51a9708290dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |||
@@ -108,3 +108,40 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s | |||
108 | p = next + 1; | 108 | p = next + 1; |
109 | } | 109 | } |
110 | } | 110 | } |
111 | |||
112 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) | ||
113 | { | ||
114 | int i, queue, pipe, mec; | ||
115 | |||
116 | /* policy for amdgpu compute queue ownership */ | ||
117 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { | ||
118 | queue = i % adev->gfx.mec.num_queue_per_pipe; | ||
119 | pipe = (i / adev->gfx.mec.num_queue_per_pipe) | ||
120 | % adev->gfx.mec.num_pipe_per_mec; | ||
121 | mec = (i / adev->gfx.mec.num_queue_per_pipe) | ||
122 | / adev->gfx.mec.num_pipe_per_mec; | ||
123 | |||
124 | /* we've run out of HW */ | ||
125 | if (mec >= adev->gfx.mec.num_mec) | ||
126 | break; | ||
127 | |||
128 | if (adev->gfx.mec.num_mec > 1) { | ||
129 | /* policy: amdgpu owns the first two queues of the first MEC */ | ||
130 | if (mec == 0 && queue < 2) | ||
131 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
132 | } else { | ||
133 | /* policy: amdgpu owns all queues in the first pipe */ | ||
134 | if (mec == 0 && pipe == 0) | ||
135 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | /* update the number of active compute rings */ | ||
140 | adev->gfx.num_compute_rings = | ||
141 | bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | ||
142 | |||
143 | /* If you hit this case and edited the policy, you probably just | ||
144 | * need to increase AMDGPU_MAX_COMPUTE_RINGS */ | ||
145 | if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) | ||
146 | adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; | ||
147 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 2d846ef1c033..9b9ea6eb49c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | |||
@@ -30,6 +30,8 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); | |||
30 | void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, | 30 | void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, |
31 | unsigned max_sh); | 31 | unsigned max_sh); |
32 | 32 | ||
33 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); | ||
34 | |||
33 | /** | 35 | /** |
34 | * amdgpu_gfx_create_bitmask - create a bitmask | 36 | * amdgpu_gfx_create_bitmask - create a bitmask |
35 | * | 37 | * |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 862bc724de42..6ffb2da7b3df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2809,43 +2809,6 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) | |||
2809 | } | 2809 | } |
2810 | } | 2810 | } |
2811 | 2811 | ||
2812 | static void gfx_v7_0_compute_queue_acquire(struct amdgpu_device *adev) | ||
2813 | { | ||
2814 | int i, queue, pipe, mec; | ||
2815 | |||
2816 | /* policy for amdgpu compute queue ownership */ | ||
2817 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { | ||
2818 | queue = i % adev->gfx.mec.num_queue_per_pipe; | ||
2819 | pipe = (i / adev->gfx.mec.num_queue_per_pipe) | ||
2820 | % adev->gfx.mec.num_pipe_per_mec; | ||
2821 | mec = (i / adev->gfx.mec.num_queue_per_pipe) | ||
2822 | / adev->gfx.mec.num_pipe_per_mec; | ||
2823 | |||
2824 | /* we've run out of HW */ | ||
2825 | if (mec >= adev->gfx.mec.num_mec) | ||
2826 | break; | ||
2827 | |||
2828 | if (adev->gfx.mec.num_mec > 1) { | ||
2829 | /* policy: amdgpu owns the first two queues of the first MEC */ | ||
2830 | if (mec == 0 && queue < 2) | ||
2831 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
2832 | } else { | ||
2833 | /* policy: amdgpu owns all queues in the first pipe */ | ||
2834 | if (mec == 0 && pipe == 0) | ||
2835 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
2836 | } | ||
2837 | } | ||
2838 | |||
2839 | /* update the number of active compute rings */ | ||
2840 | adev->gfx.num_compute_rings = | ||
2841 | bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | ||
2842 | |||
2843 | /* If you hit this case and edited the policy, you probably just | ||
2844 | * need to increase AMDGPU_MAX_COMPUTE_RINGS */ | ||
2845 | if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) | ||
2846 | adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; | ||
2847 | } | ||
2848 | |||
2849 | static int gfx_v7_0_mec_init(struct amdgpu_device *adev) | 2812 | static int gfx_v7_0_mec_init(struct amdgpu_device *adev) |
2850 | { | 2813 | { |
2851 | int r; | 2814 | int r; |
@@ -2870,7 +2833,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) | |||
2870 | adev->gfx.mec.num_queue_per_pipe = 8; | 2833 | adev->gfx.mec.num_queue_per_pipe = 8; |
2871 | 2834 | ||
2872 | /* take ownership of the relevant compute queues */ | 2835 | /* take ownership of the relevant compute queues */ |
2873 | gfx_v7_0_compute_queue_acquire(adev); | 2836 | amdgpu_gfx_compute_queue_acquire(adev); |
2874 | 2837 | ||
2875 | /* allocate space for ALL pipes (even the ones we don't own) */ | 2838 | /* allocate space for ALL pipes (even the ones we don't own) */ |
2876 | mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec | 2839 | mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1370b3980791..cfa37f1ba06d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -1448,43 +1448,6 @@ static void gfx_v8_0_kiq_free_ring(struct amdgpu_ring *ring, | |||
1448 | amdgpu_ring_fini(ring); | 1448 | amdgpu_ring_fini(ring); |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | static void gfx_v8_0_compute_queue_acquire(struct amdgpu_device *adev) | ||
1452 | { | ||
1453 | int i, queue, pipe, mec; | ||
1454 | |||
1455 | /* policy for amdgpu compute queue ownership */ | ||
1456 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { | ||
1457 | queue = i % adev->gfx.mec.num_queue_per_pipe; | ||
1458 | pipe = (i / adev->gfx.mec.num_queue_per_pipe) | ||
1459 | % adev->gfx.mec.num_pipe_per_mec; | ||
1460 | mec = (i / adev->gfx.mec.num_queue_per_pipe) | ||
1461 | / adev->gfx.mec.num_pipe_per_mec; | ||
1462 | |||
1463 | /* we've run out of HW */ | ||
1464 | if (mec >= adev->gfx.mec.num_mec) | ||
1465 | break; | ||
1466 | |||
1467 | if (adev->gfx.mec.num_mec > 1) { | ||
1468 | /* policy: amdgpu owns the first two queues of the first MEC */ | ||
1469 | if (mec == 0 && queue < 2) | ||
1470 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
1471 | } else { | ||
1472 | /* policy: amdgpu owns all queues in the first pipe */ | ||
1473 | if (mec == 0 && pipe == 0) | ||
1474 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1478 | /* update the number of active compute rings */ | ||
1479 | adev->gfx.num_compute_rings = | ||
1480 | bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | ||
1481 | |||
1482 | /* If you hit this case and edited the policy, you probably just | ||
1483 | * need to increase AMDGPU_MAX_COMPUTE_RINGS */ | ||
1484 | if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) | ||
1485 | adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; | ||
1486 | } | ||
1487 | |||
1488 | static int gfx_v8_0_mec_init(struct amdgpu_device *adev) | 1451 | static int gfx_v8_0_mec_init(struct amdgpu_device *adev) |
1489 | { | 1452 | { |
1490 | int r; | 1453 | int r; |
@@ -1513,7 +1476,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) | |||
1513 | adev->gfx.mec.num_queue_per_pipe = 8; | 1476 | adev->gfx.mec.num_queue_per_pipe = 8; |
1514 | 1477 | ||
1515 | /* take ownership of the relevant compute queues */ | 1478 | /* take ownership of the relevant compute queues */ |
1516 | gfx_v8_0_compute_queue_acquire(adev); | 1479 | amdgpu_gfx_compute_queue_acquire(adev); |
1517 | 1480 | ||
1518 | mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; | 1481 | mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; |
1519 | 1482 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9d675b37883d..3ea0e716360d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
@@ -857,43 +857,6 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) | |||
857 | } | 857 | } |
858 | } | 858 | } |
859 | 859 | ||
860 | static void gfx_v9_0_compute_queue_acquire(struct amdgpu_device *adev) | ||
861 | { | ||
862 | int i, queue, pipe, mec; | ||
863 | |||
864 | /* policy for amdgpu compute queue ownership */ | ||
865 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { | ||
866 | queue = i % adev->gfx.mec.num_queue_per_pipe; | ||
867 | pipe = (i / adev->gfx.mec.num_queue_per_pipe) | ||
868 | % adev->gfx.mec.num_pipe_per_mec; | ||
869 | mec = (i / adev->gfx.mec.num_queue_per_pipe) | ||
870 | / adev->gfx.mec.num_pipe_per_mec; | ||
871 | |||
872 | /* we've run out of HW */ | ||
873 | if (mec >= adev->gfx.mec.num_mec) | ||
874 | break; | ||
875 | |||
876 | if (adev->gfx.mec.num_mec > 1) { | ||
877 | /* policy: amdgpu owns the first two queues of the first MEC */ | ||
878 | if (mec == 0 && queue < 2) | ||
879 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
880 | } else { | ||
881 | /* policy: amdgpu owns all queues in the first pipe */ | ||
882 | if (mec == 0 && pipe == 0) | ||
883 | set_bit(i, adev->gfx.mec.queue_bitmap); | ||
884 | } | ||
885 | } | ||
886 | |||
887 | /* update the number of active compute rings */ | ||
888 | adev->gfx.num_compute_rings = | ||
889 | bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | ||
890 | |||
891 | /* If you hit this case and edited the policy, you probably just | ||
892 | * need to increase AMDGPU_MAX_COMPUTE_RINGS */ | ||
893 | if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) | ||
894 | adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; | ||
895 | } | ||
896 | |||
897 | static int gfx_v9_0_mec_init(struct amdgpu_device *adev) | 860 | static int gfx_v9_0_mec_init(struct amdgpu_device *adev) |
898 | { | 861 | { |
899 | int r; | 862 | int r; |
@@ -920,7 +883,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) | |||
920 | adev->gfx.mec.num_queue_per_pipe = 8; | 883 | adev->gfx.mec.num_queue_per_pipe = 8; |
921 | 884 | ||
922 | /* take ownership of the relevant compute queues */ | 885 | /* take ownership of the relevant compute queues */ |
923 | gfx_v9_0_compute_queue_acquire(adev); | 886 | amdgpu_gfx_compute_queue_acquire(adev); |
924 | mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; | 887 | mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; |
925 | 888 | ||
926 | if (adev->gfx.mec.hpd_eop_obj == NULL) { | 889 | if (adev->gfx.mec.hpd_eop_obj == NULL) { |