diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2016-03-29 18:28:50 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-03-29 23:53:37 -0400 |
commit | d766e6a393383c60a55bdcc72586f21a1ff12509 (patch) | |
tree | 2850bdf21e89a7bd48c8d08cd7ff6463ad955214 /drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |
parent | 832be4041d4999e008839d12d1efe118da27bd99 (diff) |
drm/amdgpu: switch ih handling to two levels (v3)
Newer asics have a two levels of irq ids now:
client id - the IP
src id - the interrupt src within the IP
v2: integrated Christian's comments.
v3: fix rebase fail in SI and CIK
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Ken Wang <Qingqing.Wang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 111 |
1 files changed, 76 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index e63ece049b05..7e7acd47ec78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |||
@@ -89,23 +89,28 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) | |||
89 | static void amdgpu_irq_disable_all(struct amdgpu_device *adev) | 89 | static void amdgpu_irq_disable_all(struct amdgpu_device *adev) |
90 | { | 90 | { |
91 | unsigned long irqflags; | 91 | unsigned long irqflags; |
92 | unsigned i, j; | 92 | unsigned i, j, k; |
93 | int r; | 93 | int r; |
94 | 94 | ||
95 | spin_lock_irqsave(&adev->irq.lock, irqflags); | 95 | spin_lock_irqsave(&adev->irq.lock, irqflags); |
96 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | 96 | for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { |
97 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | 97 | if (!adev->irq.client[i].sources) |
98 | |||
99 | if (!src || !src->funcs->set || !src->num_types) | ||
100 | continue; | 98 | continue; |
101 | 99 | ||
102 | for (j = 0; j < src->num_types; ++j) { | 100 | for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { |
103 | atomic_set(&src->enabled_types[j], 0); | 101 | struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; |
104 | r = src->funcs->set(adev, src, j, | 102 | |
105 | AMDGPU_IRQ_STATE_DISABLE); | 103 | if (!src || !src->funcs->set || !src->num_types) |
106 | if (r) | 104 | continue; |
107 | DRM_ERROR("error disabling interrupt (%d)\n", | 105 | |
108 | r); | 106 | for (k = 0; k < src->num_types; ++k) { |
107 | atomic_set(&src->enabled_types[k], 0); | ||
108 | r = src->funcs->set(adev, src, k, | ||
109 | AMDGPU_IRQ_STATE_DISABLE); | ||
110 | if (r) | ||
111 | DRM_ERROR("error disabling interrupt (%d)\n", | ||
112 | r); | ||
113 | } | ||
109 | } | 114 | } |
110 | } | 115 | } |
111 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); | 116 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); |
@@ -254,7 +259,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) | |||
254 | */ | 259 | */ |
255 | void amdgpu_irq_fini(struct amdgpu_device *adev) | 260 | void amdgpu_irq_fini(struct amdgpu_device *adev) |
256 | { | 261 | { |
257 | unsigned i; | 262 | unsigned i, j; |
258 | 263 | ||
259 | drm_vblank_cleanup(adev->ddev); | 264 | drm_vblank_cleanup(adev->ddev); |
260 | if (adev->irq.installed) { | 265 | if (adev->irq.installed) { |
@@ -266,19 +271,25 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) | |||
266 | cancel_work_sync(&adev->reset_work); | 271 | cancel_work_sync(&adev->reset_work); |
267 | } | 272 | } |
268 | 273 | ||
269 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | 274 | for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { |
270 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | 275 | if (!adev->irq.client[i].sources) |
271 | |||
272 | if (!src) | ||
273 | continue; | 276 | continue; |
274 | 277 | ||
275 | kfree(src->enabled_types); | 278 | for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { |
276 | src->enabled_types = NULL; | 279 | struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; |
277 | if (src->data) { | 280 | |
278 | kfree(src->data); | 281 | if (!src) |
279 | kfree(src); | 282 | continue; |
280 | adev->irq.sources[i] = NULL; | 283 | |
284 | kfree(src->enabled_types); | ||
285 | src->enabled_types = NULL; | ||
286 | if (src->data) { | ||
287 | kfree(src->data); | ||
288 | kfree(src); | ||
289 | adev->irq.client[i].sources[j] = NULL; | ||
290 | } | ||
281 | } | 291 | } |
292 | kfree(adev->irq.client[i].sources); | ||
282 | } | 293 | } |
283 | } | 294 | } |
284 | 295 | ||
@@ -290,18 +301,30 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) | |||
290 | * @source: irq source | 301 | * @source: irq source |
291 | * | 302 | * |
292 | */ | 303 | */ |
293 | int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, | 304 | int amdgpu_irq_add_id(struct amdgpu_device *adev, |
305 | unsigned client_id, unsigned src_id, | ||
294 | struct amdgpu_irq_src *source) | 306 | struct amdgpu_irq_src *source) |
295 | { | 307 | { |
296 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) | 308 | if (client_id >= AMDGPU_IH_CLIENTID_MAX) |
297 | return -EINVAL; | 309 | return -EINVAL; |
298 | 310 | ||
299 | if (adev->irq.sources[src_id] != NULL) | 311 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) |
300 | return -EINVAL; | 312 | return -EINVAL; |
301 | 313 | ||
302 | if (!source->funcs) | 314 | if (!source->funcs) |
303 | return -EINVAL; | 315 | return -EINVAL; |
304 | 316 | ||
317 | if (!adev->irq.client[client_id].sources) { | ||
318 | adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID, | ||
319 | sizeof(struct amdgpu_irq_src), | ||
320 | GFP_KERNEL); | ||
321 | if (!adev->irq.client[client_id].sources) | ||
322 | return -ENOMEM; | ||
323 | } | ||
324 | |||
325 | if (adev->irq.client[client_id].sources[src_id] != NULL) | ||
326 | return -EINVAL; | ||
327 | |||
305 | if (source->num_types && !source->enabled_types) { | 328 | if (source->num_types && !source->enabled_types) { |
306 | atomic_t *types; | 329 | atomic_t *types; |
307 | 330 | ||
@@ -313,8 +336,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, | |||
313 | source->enabled_types = types; | 336 | source->enabled_types = types; |
314 | } | 337 | } |
315 | 338 | ||
316 | adev->irq.sources[src_id] = source; | 339 | adev->irq.client[client_id].sources[src_id] = source; |
317 | |||
318 | return 0; | 340 | return 0; |
319 | } | 341 | } |
320 | 342 | ||
@@ -329,10 +351,16 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, | |||
329 | void amdgpu_irq_dispatch(struct amdgpu_device *adev, | 351 | void amdgpu_irq_dispatch(struct amdgpu_device *adev, |
330 | struct amdgpu_iv_entry *entry) | 352 | struct amdgpu_iv_entry *entry) |
331 | { | 353 | { |
354 | unsigned client_id = entry->client_id; | ||
332 | unsigned src_id = entry->src_id; | 355 | unsigned src_id = entry->src_id; |
333 | struct amdgpu_irq_src *src; | 356 | struct amdgpu_irq_src *src; |
334 | int r; | 357 | int r; |
335 | 358 | ||
359 | if (client_id >= AMDGPU_IH_CLIENTID_MAX) { | ||
360 | DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); | ||
361 | return; | ||
362 | } | ||
363 | |||
336 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { | 364 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { |
337 | DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); | 365 | DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); |
338 | return; | 366 | return; |
@@ -341,7 +369,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, | |||
341 | if (adev->irq.virq[src_id]) { | 369 | if (adev->irq.virq[src_id]) { |
342 | generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); | 370 | generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); |
343 | } else { | 371 | } else { |
344 | src = adev->irq.sources[src_id]; | 372 | if (!adev->irq.client[client_id].sources) { |
373 | DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", | ||
374 | client_id, src_id); | ||
375 | return; | ||
376 | } | ||
377 | |||
378 | src = adev->irq.client[client_id].sources[src_id]; | ||
345 | if (!src) { | 379 | if (!src) { |
346 | DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); | 380 | DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); |
347 | return; | 381 | return; |
@@ -385,13 +419,20 @@ int amdgpu_irq_update(struct amdgpu_device *adev, | |||
385 | 419 | ||
386 | void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) | 420 | void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) |
387 | { | 421 | { |
388 | int i, j; | 422 | int i, j, k; |
389 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) { | 423 | |
390 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | 424 | for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { |
391 | if (!src) | 425 | if (!adev->irq.client[i].sources) |
392 | continue; | 426 | continue; |
393 | for (j = 0; j < src->num_types; j++) | 427 | |
394 | amdgpu_irq_update(adev, src, j); | 428 | for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { |
429 | struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; | ||
430 | |||
431 | if (!src) | ||
432 | continue; | ||
433 | for (k = 0; k < src->num_types; k++) | ||
434 | amdgpu_irq_update(adev, src, k); | ||
435 | } | ||
395 | } | 436 | } |
396 | } | 437 | } |
397 | 438 | ||