aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
diff options
context:
space:
mode:
authorMonk Liu <Monk.Liu@amd.com>2017-04-05 00:17:18 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-04-06 13:28:05 -0400
commitf98b617ed5cc47157c4ccb8204b41ccec9f1281d (patch)
tree9d3f2dd05c58336f27870d41102ef116cd1e9416 /drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
parent94b4fd725b7d8625a66034dee683f55c66f77a1f (diff)
drm/amdgpu:implement the reset MB func for vega10
they are lack in the bringup stage, we need them for GPU reset feature. Signed-off-by: Monk Liu <Monk.Liu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c133
1 files changed, 133 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 740c0f8e4b22..1493301b6a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -28,6 +28,7 @@
28#include "vega10/GC/gc_9_0_offset.h" 28#include "vega10/GC/gc_9_0_offset.h"
29#include "vega10/GC/gc_9_0_sh_mask.h" 29#include "vega10/GC/gc_9_0_sh_mask.h"
30#include "soc15.h" 30#include "soc15.h"
31#include "vega10_ih.h"
31#include "soc15_common.h" 32#include "soc15_common.h"
32#include "mxgpu_ai.h" 33#include "mxgpu_ai.h"
33 34
@@ -180,6 +181,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
180 return 0; 181 return 0;
181} 182}
182 183
184static int xgpu_ai_request_reset(struct amdgpu_device *adev)
185{
186 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
187}
188
183static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 189static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
184 bool init) 190 bool init)
185{ 191{
@@ -201,7 +207,134 @@ static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
201 return r; 207 return r;
202} 208}
203 209
210static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
211 struct amdgpu_irq_src *source,
212 struct amdgpu_iv_entry *entry)
213{
214 DRM_DEBUG("get ack intr and do nothing.\n");
215 return 0;
216}
217
218static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
219 struct amdgpu_irq_src *source,
220 unsigned type,
221 enum amdgpu_interrupt_state state)
222{
223 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
224
225 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
226 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
227 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
228
229 return 0;
230}
231
232static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
233{
234 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
235 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
236
237 /* wait until RCV_MSG become 3 */
238 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
239 pr_err("failed to recieve FLR_CMPL\n");
240 return;
241 }
242
243 /* Trigger recovery due to world switch failure */
244 amdgpu_sriov_gpu_reset(adev, false);
245}
246
247static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
248 struct amdgpu_irq_src *src,
249 unsigned type,
250 enum amdgpu_interrupt_state state)
251{
252 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
253
254 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
255 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
256 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
257
258 return 0;
259}
260
261static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
262 struct amdgpu_irq_src *source,
263 struct amdgpu_iv_entry *entry)
264{
265 int r;
266
267 /* see what event we get */
268 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
269
270 /* only handle FLR_NOTIFY now */
271 if (!r)
272 schedule_work(&adev->virt.flr_work);
273
274 return 0;
275}
276
277static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
278 .set = xgpu_ai_set_mailbox_ack_irq,
279 .process = xgpu_ai_mailbox_ack_irq,
280};
281
282static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
283 .set = xgpu_ai_set_mailbox_rcv_irq,
284 .process = xgpu_ai_mailbox_rcv_irq,
285};
286
287void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
288{
289 adev->virt.ack_irq.num_types = 1;
290 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
291 adev->virt.rcv_irq.num_types = 1;
292 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
293}
294
295int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
296{
297 int r;
298
299 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
300 if (r)
301 return r;
302
303 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
304 if (r) {
305 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
306 return r;
307 }
308
309 return 0;
310}
311
312int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
313{
314 int r;
315
316 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
317 if (r)
318 return r;
319 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
320 if (r) {
321 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
322 return r;
323 }
324
325 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
326
327 return 0;
328}
329
330void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
331{
332 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
333 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
334}
335
204const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 336const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
205 .req_full_gpu = xgpu_ai_request_full_gpu_access, 337 .req_full_gpu = xgpu_ai_request_full_gpu_access,
206 .rel_full_gpu = xgpu_ai_release_full_gpu_access, 338 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
339 .reset_gpu = xgpu_ai_request_reset,
207}; 340};