diff options
author | Monk Liu <Monk.Liu@amd.com> | 2017-02-06 03:46:36 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-03-29 23:52:44 -0400 |
commit | 596c67d076a5ae0f2571cab9245ee76f6a6cf922 (patch) | |
tree | 77947d1410bfbe8542caa646113e2045ed43cba3 /drivers | |
parent | 2da4da3cbe78ee6555b3999494b0f080d17f93b1 (diff) |
drm/amdgpu:divide KCQ mqd init to sw and hw
sw part only invoked once during sw_init.
hw part invoked during first drv load and resume later.
that way we cannot alloc mqd in hw/resume, we only keep
mqd allocted in sw_init routine.
and hw_init routine only kmap and set it.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 107 |
1 files changed, 42 insertions, 65 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 0dfc7659cfe7..7a439301e9a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -2116,17 +2116,6 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2116 | return r; | 2116 | return r; |
2117 | } | 2117 | } |
2118 | 2118 | ||
2119 | r = gfx_v8_0_kiq_init(adev); | ||
2120 | if (r) { | ||
2121 | DRM_ERROR("Failed to init KIQ BOs!\n"); | ||
2122 | return r; | ||
2123 | } | ||
2124 | |||
2125 | kiq = &adev->gfx.kiq; | ||
2126 | r = gfx_v8_0_kiq_init_ring(adev, &kiq->ring, &kiq->irq); | ||
2127 | if (r) | ||
2128 | return r; | ||
2129 | |||
2130 | /* set up the gfx ring */ | 2119 | /* set up the gfx ring */ |
2131 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | 2120 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
2132 | ring = &adev->gfx.gfx_ring[i]; | 2121 | ring = &adev->gfx.gfx_ring[i]; |
@@ -2169,6 +2158,24 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2169 | return r; | 2158 | return r; |
2170 | } | 2159 | } |
2171 | 2160 | ||
2161 | if (amdgpu_sriov_vf(adev)) { | ||
2162 | r = gfx_v8_0_kiq_init(adev); | ||
2163 | if (r) { | ||
2164 | DRM_ERROR("Failed to init KIQ BOs!\n"); | ||
2165 | return r; | ||
2166 | } | ||
2167 | |||
2168 | kiq = &adev->gfx.kiq; | ||
2169 | r = gfx_v8_0_kiq_init_ring(adev, &kiq->ring, &kiq->irq); | ||
2170 | if (r) | ||
2171 | return r; | ||
2172 | |||
2173 | /* create MQD for all compute queues as wel as KIQ for SRIOV case */ | ||
2174 | r = gfx_v8_0_compute_mqd_soft_init(adev); | ||
2175 | if (r) | ||
2176 | return r; | ||
2177 | } | ||
2178 | |||
2172 | /* reserve GDS, GWS and OA resource for gfx */ | 2179 | /* reserve GDS, GWS and OA resource for gfx */ |
2173 | r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, | 2180 | r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, |
2174 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, | 2181 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, |
@@ -2210,9 +2217,13 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
2210 | amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); | 2217 | amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); |
2211 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 2218 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
2212 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); | 2219 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); |
2213 | gfx_v8_0_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); | ||
2214 | 2220 | ||
2215 | gfx_v8_0_kiq_fini(adev); | 2221 | if (amdgpu_sriov_vf(adev)) { |
2222 | gfx_v8_0_compute_mqd_soft_fini(adev); | ||
2223 | gfx_v8_0_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); | ||
2224 | gfx_v8_0_kiq_fini(adev); | ||
2225 | } | ||
2226 | |||
2216 | gfx_v8_0_mec_fini(adev); | 2227 | gfx_v8_0_mec_fini(adev); |
2217 | gfx_v8_0_rlc_fini(adev); | 2228 | gfx_v8_0_rlc_fini(adev); |
2218 | gfx_v8_0_free_microcode(adev); | 2229 | gfx_v8_0_free_microcode(adev); |
@@ -4900,70 +4911,37 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring, | |||
4900 | return 0; | 4911 | return 0; |
4901 | } | 4912 | } |
4902 | 4913 | ||
4903 | static void gfx_v8_0_kiq_free_queue(struct amdgpu_device *adev) | 4914 | static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) |
4904 | { | 4915 | { |
4905 | struct amdgpu_ring *ring = NULL; | 4916 | struct amdgpu_ring *ring = NULL; |
4906 | int i; | 4917 | int r = 0, i; |
4907 | 4918 | ||
4908 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 4919 | gfx_v8_0_cp_compute_enable(adev, true); |
4909 | ring = &adev->gfx.compute_ring[i]; | ||
4910 | amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); | ||
4911 | ring->mqd_obj = NULL; | ||
4912 | } | ||
4913 | 4920 | ||
4914 | ring = &adev->gfx.kiq.ring; | 4921 | ring = &adev->gfx.kiq.ring; |
4915 | amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); | 4922 | if (!amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr)) { |
4916 | ring->mqd_obj = NULL; | 4923 | memset((void *)ring->mqd_ptr, 0, sizeof(struct vi_mqd)); |
4917 | } | 4924 | r = gfx_v8_0_kiq_init_queue(ring, ring->mqd_ptr, ring->mqd_gpu_addr); |
4918 | 4925 | amdgpu_bo_kunmap(ring->mqd_obj); | |
4919 | static int gfx_v8_0_kiq_setup_queue(struct amdgpu_device *adev, | 4926 | if (r) |
4920 | struct amdgpu_ring *ring) | 4927 | return r; |
4921 | { | 4928 | } else { |
4922 | struct vi_mqd *mqd; | ||
4923 | u64 mqd_gpu_addr; | ||
4924 | u32 *buf; | ||
4925 | int r = 0; | ||
4926 | |||
4927 | r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE, | ||
4928 | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, | ||
4929 | &mqd_gpu_addr, (void **)&buf); | ||
4930 | if (r) { | ||
4931 | dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); | ||
4932 | return r; | 4929 | return r; |
4933 | } | 4930 | } |
4934 | 4931 | ||
4935 | /* init the mqd struct */ | ||
4936 | memset(buf, 0, sizeof(struct vi_mqd)); | ||
4937 | mqd = (struct vi_mqd *)buf; | ||
4938 | |||
4939 | r = gfx_v8_0_kiq_init_queue(ring, mqd, mqd_gpu_addr); | ||
4940 | if (r) | ||
4941 | return r; | ||
4942 | |||
4943 | amdgpu_bo_kunmap(ring->mqd_obj); | ||
4944 | |||
4945 | return 0; | ||
4946 | } | ||
4947 | |||
4948 | static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) | ||
4949 | { | ||
4950 | struct amdgpu_ring *ring = NULL; | ||
4951 | int r, i; | ||
4952 | |||
4953 | ring = &adev->gfx.kiq.ring; | ||
4954 | r = gfx_v8_0_kiq_setup_queue(adev, ring); | ||
4955 | if (r) | ||
4956 | return r; | ||
4957 | |||
4958 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 4932 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
4959 | ring = &adev->gfx.compute_ring[i]; | 4933 | ring = &adev->gfx.compute_ring[i]; |
4960 | r = gfx_v8_0_kiq_setup_queue(adev, ring); | 4934 | if (!amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr)) { |
4961 | if (r) | 4935 | memset((void *)ring->mqd_ptr, 0, sizeof(struct vi_mqd)); |
4936 | r = gfx_v8_0_kiq_init_queue(ring, ring->mqd_ptr, ring->mqd_gpu_addr); | ||
4937 | amdgpu_bo_kunmap(ring->mqd_obj); | ||
4938 | if (r) | ||
4939 | return r; | ||
4940 | } else { | ||
4962 | return r; | 4941 | return r; |
4942 | } | ||
4963 | } | 4943 | } |
4964 | 4944 | ||
4965 | gfx_v8_0_cp_compute_enable(adev, true); | ||
4966 | |||
4967 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 4945 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
4968 | ring = &adev->gfx.compute_ring[i]; | 4946 | ring = &adev->gfx.compute_ring[i]; |
4969 | 4947 | ||
@@ -5324,7 +5302,6 @@ static int gfx_v8_0_hw_fini(void *handle) | |||
5324 | amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); | 5302 | amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); |
5325 | amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); | 5303 | amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); |
5326 | if (amdgpu_sriov_vf(adev)) { | 5304 | if (amdgpu_sriov_vf(adev)) { |
5327 | gfx_v8_0_kiq_free_queue(adev); | ||
5328 | pr_debug("For SRIOV client, shouldn't do anything.\n"); | 5305 | pr_debug("For SRIOV client, shouldn't do anything.\n"); |
5329 | return 0; | 5306 | return 0; |
5330 | } | 5307 | } |