diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2017-11-06 08:44:23 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-11-27 12:23:11 -0500 |
commit | c6b9177cfff8a41c3c3c78f5c47c7df677ced58c (patch) | |
tree | b402ccda611d85ec88f8557cb26d949617d92466 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |
parent | a0cea295e7b7f917c6b52221ab34c3a6111fb224 (diff) |
gpu: nvgpu: define error_notifiers in common code
All the linux specific error_notifier codes are defined in linux specific
header file <uapi/linux/nvgpu.h> and used in all the common driver
But since they are defined in linux specific file, we need to move all the
uses of those error_notifiers in linux specific code only
Hence define new error_notifiers in include/nvgpu/error_notifier.h and
use them in the common code
Add new API nvgpu_error_notifier_to_channel_notifier() to convert common
error_notifier of the form NVGPU_ERR_NOTIFIER_* to linux specific error
notifier of the form NVGPU_CHANNEL_*
Any future additions to error notifiers requires update to both the form
of error notifiers
Move all error notifier related metadata from channel_gk20a (common code)
to linux specific structure nvgpu_channel_linux
Update all accesses to this data from new structure instead of channel_gk20a
Move and rename below APIs to linux specific file and declare them
in error_notifier.h
nvgpu_set_error_notifier_locked()
nvgpu_set_error_notifier()
nvgpu_is_error_notifier_set()
Add below new API and use it in fifo_vgpu.c
nvgpu_set_error_notifier_if_empty()
Include <nvgpu/error_notifier.h> wherever new error_notifier codes are used
NVGPU-426
Change-Id: Iaa5bfc150e6e9ec17d797d445c2d6407afe9f4bd
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1593361
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 46 |
1 files changed, 21 insertions, 25 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 648a8c86..38aecc93 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <nvgpu/nvhost.h> | 39 | #include <nvgpu/nvhost.h> |
40 | #include <nvgpu/barrier.h> | 40 | #include <nvgpu/barrier.h> |
41 | #include <nvgpu/ctxsw_trace.h> | 41 | #include <nvgpu/ctxsw_trace.h> |
42 | #include <nvgpu/error_notifier.h> | ||
42 | 43 | ||
43 | #include "gk20a.h" | 44 | #include "gk20a.h" |
44 | #include "mm_gk20a.h" | 45 | #include "mm_gk20a.h" |
@@ -557,7 +558,6 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | |||
557 | nvgpu_mutex_destroy(&tsg->event_id_list_lock); | 558 | nvgpu_mutex_destroy(&tsg->event_id_list_lock); |
558 | 559 | ||
559 | nvgpu_mutex_destroy(&c->ioctl_lock); | 560 | nvgpu_mutex_destroy(&c->ioctl_lock); |
560 | nvgpu_mutex_destroy(&c->error_notifier_mutex); | ||
561 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); | 561 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); |
562 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); | 562 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); |
563 | nvgpu_mutex_destroy(&c->sync_lock); | 563 | nvgpu_mutex_destroy(&c->sync_lock); |
@@ -1339,14 +1339,10 @@ static bool gk20a_fifo_ch_timeout_debug_dump_state(struct gk20a *g, | |||
1339 | if (!refch) | 1339 | if (!refch) |
1340 | return verbose; | 1340 | return verbose; |
1341 | 1341 | ||
1342 | nvgpu_mutex_acquire(&refch->error_notifier_mutex); | 1342 | if (nvgpu_is_error_notifier_set(refch, |
1343 | if (refch->error_notifier_ref) { | 1343 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT)) |
1344 | u32 err = refch->error_notifier->info32; | 1344 | verbose = refch->timeout_debug_dump; |
1345 | 1345 | ||
1346 | if (err == NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT) | ||
1347 | verbose = refch->timeout_debug_dump; | ||
1348 | } | ||
1349 | nvgpu_mutex_release(&refch->error_notifier_mutex); | ||
1350 | return verbose; | 1346 | return verbose; |
1351 | } | 1347 | } |
1352 | 1348 | ||
@@ -1400,8 +1396,8 @@ void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, | |||
1400 | { | 1396 | { |
1401 | nvgpu_err(g, | 1397 | nvgpu_err(g, |
1402 | "channel %d generated a mmu fault", refch->chid); | 1398 | "channel %d generated a mmu fault", refch->chid); |
1403 | gk20a_set_error_notifier(refch, | 1399 | nvgpu_set_error_notifier(refch, |
1404 | NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); | 1400 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT); |
1405 | } | 1401 | } |
1406 | 1402 | ||
1407 | void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, | 1403 | void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, |
@@ -1939,7 +1935,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
1939 | 1935 | ||
1940 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { | 1936 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { |
1941 | if (gk20a_channel_get(ch_tsg)) { | 1937 | if (gk20a_channel_get(ch_tsg)) { |
1942 | gk20a_set_error_notifier(ch_tsg, err_code); | 1938 | nvgpu_set_error_notifier(ch_tsg, err_code); |
1943 | gk20a_channel_put(ch_tsg); | 1939 | gk20a_channel_put(ch_tsg); |
1944 | } | 1940 | } |
1945 | } | 1941 | } |
@@ -1947,7 +1943,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
1947 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | 1943 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
1948 | gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); | 1944 | gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); |
1949 | } else { | 1945 | } else { |
1950 | gk20a_set_error_notifier(ch, err_code); | 1946 | nvgpu_set_error_notifier(ch, err_code); |
1951 | gk20a_fifo_recover_ch(g, ch->chid, verbose); | 1947 | gk20a_fifo_recover_ch(g, ch->chid, verbose); |
1952 | } | 1948 | } |
1953 | 1949 | ||
@@ -2108,8 +2104,8 @@ static bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch, | |||
2108 | *verbose = ch->timeout_debug_dump; | 2104 | *verbose = ch->timeout_debug_dump; |
2109 | *ms = ch->timeout_accumulated_ms; | 2105 | *ms = ch->timeout_accumulated_ms; |
2110 | if (recover) | 2106 | if (recover) |
2111 | gk20a_set_error_notifier(ch, | 2107 | nvgpu_set_error_notifier(ch, |
2112 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2108 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2113 | 2109 | ||
2114 | gk20a_channel_put(ch); | 2110 | gk20a_channel_put(ch); |
2115 | } | 2111 | } |
@@ -2170,8 +2166,8 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
2170 | gk20a_channel_put(ch); | 2166 | gk20a_channel_put(ch); |
2171 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2167 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2172 | if (gk20a_channel_get(ch)) { | 2168 | if (gk20a_channel_get(ch)) { |
2173 | gk20a_set_error_notifier(ch, | 2169 | nvgpu_set_error_notifier(ch, |
2174 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2170 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2175 | *verbose |= ch->timeout_debug_dump; | 2171 | *verbose |= ch->timeout_debug_dump; |
2176 | gk20a_channel_put(ch); | 2172 | gk20a_channel_put(ch); |
2177 | } | 2173 | } |
@@ -2413,7 +2409,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, | |||
2413 | rc_type = RC_TYPE_PBDMA_FAULT; | 2409 | rc_type = RC_TYPE_PBDMA_FAULT; |
2414 | nvgpu_err(g, | 2410 | nvgpu_err(g, |
2415 | "semaphore acquire timeout!"); | 2411 | "semaphore acquire timeout!"); |
2416 | *error_notifier = NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT; | 2412 | *error_notifier = NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT; |
2417 | } | 2413 | } |
2418 | *handled |= pbdma_intr_0_acquire_pending_f(); | 2414 | *handled |= pbdma_intr_0_acquire_pending_f(); |
2419 | } | 2415 | } |
@@ -2431,7 +2427,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, | |||
2431 | 2427 | ||
2432 | if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { | 2428 | if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { |
2433 | *error_notifier = | 2429 | *error_notifier = |
2434 | NVGPU_CHANNEL_PBDMA_PUSHBUFFER_CRC_MISMATCH; | 2430 | NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH; |
2435 | rc_type = RC_TYPE_PBDMA_FAULT; | 2431 | rc_type = RC_TYPE_PBDMA_FAULT; |
2436 | } | 2432 | } |
2437 | 2433 | ||
@@ -2485,7 +2481,7 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g, | |||
2485 | struct channel_gk20a *ch = &f->channel[id]; | 2481 | struct channel_gk20a *ch = &f->channel[id]; |
2486 | 2482 | ||
2487 | if (gk20a_channel_get(ch)) { | 2483 | if (gk20a_channel_get(ch)) { |
2488 | gk20a_set_error_notifier(ch, error_notifier); | 2484 | nvgpu_set_error_notifier(ch, error_notifier); |
2489 | gk20a_fifo_recover_ch(g, id, true); | 2485 | gk20a_fifo_recover_ch(g, id, true); |
2490 | gk20a_channel_put(ch); | 2486 | gk20a_channel_put(ch); |
2491 | } | 2487 | } |
@@ -2497,7 +2493,7 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g, | |||
2497 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | 2493 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
2498 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2494 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2499 | if (gk20a_channel_get(ch)) { | 2495 | if (gk20a_channel_get(ch)) { |
2500 | gk20a_set_error_notifier(ch, | 2496 | nvgpu_set_error_notifier(ch, |
2501 | error_notifier); | 2497 | error_notifier); |
2502 | gk20a_channel_put(ch); | 2498 | gk20a_channel_put(ch); |
2503 | } | 2499 | } |
@@ -2514,7 +2510,7 @@ u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f, | |||
2514 | u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); | 2510 | u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); |
2515 | 2511 | ||
2516 | u32 handled = 0; | 2512 | u32 handled = 0; |
2517 | u32 error_notifier = NVGPU_CHANNEL_PBDMA_ERROR; | 2513 | u32 error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR; |
2518 | unsigned int rc_type = RC_TYPE_NO_RC; | 2514 | unsigned int rc_type = RC_TYPE_NO_RC; |
2519 | 2515 | ||
2520 | if (pbdma_intr_0) { | 2516 | if (pbdma_intr_0) { |
@@ -2658,8 +2654,8 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | |||
2658 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2654 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2659 | if (!gk20a_channel_get(ch)) | 2655 | if (!gk20a_channel_get(ch)) |
2660 | continue; | 2656 | continue; |
2661 | gk20a_set_error_notifier(ch, | 2657 | nvgpu_set_error_notifier(ch, |
2662 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2658 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2663 | gk20a_channel_put(ch); | 2659 | gk20a_channel_put(ch); |
2664 | } | 2660 | } |
2665 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | 2661 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
@@ -2671,8 +2667,8 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | |||
2671 | "preempt channel %d timeout", id); | 2667 | "preempt channel %d timeout", id); |
2672 | 2668 | ||
2673 | if (gk20a_channel_get(ch)) { | 2669 | if (gk20a_channel_get(ch)) { |
2674 | gk20a_set_error_notifier(ch, | 2670 | nvgpu_set_error_notifier(ch, |
2675 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2671 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2676 | gk20a_fifo_recover_ch(g, id, true); | 2672 | gk20a_fifo_recover_ch(g, id, true); |
2677 | gk20a_channel_put(ch); | 2673 | gk20a_channel_put(ch); |
2678 | } | 2674 | } |