diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2017-11-06 08:44:23 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-11-27 12:23:11 -0500 |
commit | c6b9177cfff8a41c3c3c78f5c47c7df677ced58c (patch) | |
tree | b402ccda611d85ec88f8557cb26d949617d92466 /drivers/gpu/nvgpu/gk20a | |
parent | a0cea295e7b7f917c6b52221ab34c3a6111fb224 (diff) |
gpu: nvgpu: define error_notifiers in common code
All the linux specific error_notifier codes are defined in linux specific
header file <uapi/linux/nvgpu.h> and used in all the common driver
But since they are defined in linux specific file, we need to move all the
uses of those error_notifiers in linux specific code only
Hence define new error_notifiers in include/nvgpu/error_notifier.h and
use them in the common code
Add new API nvgpu_error_notifier_to_channel_notifier() to convert common
error_notifier of the form NVGPU_ERR_NOTIFIER_* to linux specific error
notifier of the form NVGPU_CHANNEL_*
Any future additions to error notifiers requires update to both the form
of error notifiers
Move all error notifier related metadata from channel_gk20a (common code)
to linux specific structure nvgpu_channel_linux
Update all accesses to this data from new structure instead of channel_gk20a
Move and rename below APIs to linux specific file and declare them
in error_notifier.h
nvgpu_set_error_notifier_locked()
nvgpu_set_error_notifier()
nvgpu_is_error_notifier_set()
Add below new API and use it in fifo_vgpu.c
nvgpu_set_error_notifier_if_empty()
Include <nvgpu/error_notifier.h> wherever new error_notifier codes are used
NVGPU-426
Change-Id: Iaa5bfc150e6e9ec17d797d445c2d6407afe9f4bd
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1593361
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 65 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.h | 7 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 46 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 21 |
4 files changed, 47 insertions, 92 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index e01d6cdb..a0415861 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <nvgpu/ltc.h> | 44 | #include <nvgpu/ltc.h> |
45 | #include <nvgpu/barrier.h> | 45 | #include <nvgpu/barrier.h> |
46 | #include <nvgpu/ctxsw_trace.h> | 46 | #include <nvgpu/ctxsw_trace.h> |
47 | #include <nvgpu/error_notifier.h> | ||
47 | 48 | ||
48 | #include "gk20a.h" | 49 | #include "gk20a.h" |
49 | #include "dbg_gpu_gk20a.h" | 50 | #include "dbg_gpu_gk20a.h" |
@@ -339,37 +340,6 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch, | |||
339 | return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true); | 340 | return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true); |
340 | } | 341 | } |
341 | 342 | ||
342 | /** | ||
343 | * gk20a_set_error_notifier_locked() | ||
344 | * Should be called with ch->error_notifier_mutex held | ||
345 | */ | ||
346 | void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error) | ||
347 | { | ||
348 | if (ch->error_notifier_ref) { | ||
349 | struct timespec time_data; | ||
350 | u64 nsec; | ||
351 | getnstimeofday(&time_data); | ||
352 | nsec = ((u64)time_data.tv_sec) * 1000000000u + | ||
353 | (u64)time_data.tv_nsec; | ||
354 | ch->error_notifier->time_stamp.nanoseconds[0] = | ||
355 | (u32)nsec; | ||
356 | ch->error_notifier->time_stamp.nanoseconds[1] = | ||
357 | (u32)(nsec >> 32); | ||
358 | ch->error_notifier->info32 = error; | ||
359 | ch->error_notifier->status = 0xffff; | ||
360 | |||
361 | nvgpu_err(ch->g, | ||
362 | "error notifier set to %d for ch %d", error, ch->chid); | ||
363 | } | ||
364 | } | ||
365 | |||
366 | void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error) | ||
367 | { | ||
368 | nvgpu_mutex_acquire(&ch->error_notifier_mutex); | ||
369 | gk20a_set_error_notifier_locked(ch, error); | ||
370 | nvgpu_mutex_release(&ch->error_notifier_mutex); | ||
371 | } | ||
372 | |||
373 | static void gk20a_wait_until_counter_is_N( | 343 | static void gk20a_wait_until_counter_is_N( |
374 | struct channel_gk20a *ch, nvgpu_atomic_t *counter, int wait_value, | 344 | struct channel_gk20a *ch, nvgpu_atomic_t *counter, int wait_value, |
375 | struct nvgpu_cond *c, const char *caller, const char *counter_name) | 345 | struct nvgpu_cond *c, const char *caller, const char *counter_name) |
@@ -1550,7 +1520,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch) | |||
1550 | gk20a_gr_debug_dump(g); | 1520 | gk20a_gr_debug_dump(g); |
1551 | 1521 | ||
1552 | g->ops.fifo.force_reset_ch(ch, | 1522 | g->ops.fifo.force_reset_ch(ch, |
1553 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT, true); | 1523 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT, true); |
1554 | } | 1524 | } |
1555 | 1525 | ||
1556 | /** | 1526 | /** |
@@ -2210,53 +2180,48 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid) | |||
2210 | err = nvgpu_mutex_init(&c->ioctl_lock); | 2180 | err = nvgpu_mutex_init(&c->ioctl_lock); |
2211 | if (err) | 2181 | if (err) |
2212 | return err; | 2182 | return err; |
2213 | err = nvgpu_mutex_init(&c->error_notifier_mutex); | ||
2214 | if (err) | ||
2215 | goto fail_1; | ||
2216 | err = nvgpu_mutex_init(&c->joblist.cleanup_lock); | 2183 | err = nvgpu_mutex_init(&c->joblist.cleanup_lock); |
2217 | if (err) | 2184 | if (err) |
2218 | goto fail_2; | 2185 | goto fail_1; |
2219 | err = nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock); | 2186 | err = nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock); |
2220 | if (err) | 2187 | if (err) |
2221 | goto fail_3; | 2188 | goto fail_2; |
2222 | err = nvgpu_mutex_init(&c->sync_lock); | 2189 | err = nvgpu_mutex_init(&c->sync_lock); |
2223 | if (err) | 2190 | if (err) |
2224 | goto fail_4; | 2191 | goto fail_3; |
2225 | #if defined(CONFIG_GK20A_CYCLE_STATS) | 2192 | #if defined(CONFIG_GK20A_CYCLE_STATS) |
2226 | err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); | 2193 | err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); |
2227 | if (err) | 2194 | if (err) |
2228 | goto fail_5; | 2195 | goto fail_4; |
2229 | err = nvgpu_mutex_init(&c->cs_client_mutex); | 2196 | err = nvgpu_mutex_init(&c->cs_client_mutex); |
2230 | if (err) | 2197 | if (err) |
2231 | goto fail_6; | 2198 | goto fail_5; |
2232 | #endif | 2199 | #endif |
2233 | err = nvgpu_mutex_init(&c->event_id_list_lock); | 2200 | err = nvgpu_mutex_init(&c->event_id_list_lock); |
2234 | if (err) | 2201 | if (err) |
2235 | goto fail_7; | 2202 | goto fail_6; |
2236 | err = nvgpu_mutex_init(&c->dbg_s_lock); | 2203 | err = nvgpu_mutex_init(&c->dbg_s_lock); |
2237 | if (err) | 2204 | if (err) |
2238 | goto fail_8; | 2205 | goto fail_7; |
2239 | 2206 | ||
2240 | nvgpu_list_add(&c->free_chs, &g->fifo.free_chs); | 2207 | nvgpu_list_add(&c->free_chs, &g->fifo.free_chs); |
2241 | 2208 | ||
2242 | return 0; | 2209 | return 0; |
2243 | 2210 | ||
2244 | fail_8: | ||
2245 | nvgpu_mutex_destroy(&c->event_id_list_lock); | ||
2246 | fail_7: | 2211 | fail_7: |
2212 | nvgpu_mutex_destroy(&c->event_id_list_lock); | ||
2213 | fail_6: | ||
2247 | #if defined(CONFIG_GK20A_CYCLE_STATS) | 2214 | #if defined(CONFIG_GK20A_CYCLE_STATS) |
2248 | nvgpu_mutex_destroy(&c->cs_client_mutex); | 2215 | nvgpu_mutex_destroy(&c->cs_client_mutex); |
2249 | fail_6: | ||
2250 | nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); | ||
2251 | fail_5: | 2216 | fail_5: |
2217 | nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); | ||
2218 | fail_4: | ||
2252 | #endif | 2219 | #endif |
2253 | nvgpu_mutex_destroy(&c->sync_lock); | 2220 | nvgpu_mutex_destroy(&c->sync_lock); |
2254 | fail_4: | ||
2255 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); | ||
2256 | fail_3: | 2221 | fail_3: |
2257 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); | 2222 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); |
2258 | fail_2: | 2223 | fail_2: |
2259 | nvgpu_mutex_destroy(&c->error_notifier_mutex); | 2224 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); |
2260 | fail_1: | 2225 | fail_1: |
2261 | nvgpu_mutex_destroy(&c->ioctl_lock); | 2226 | nvgpu_mutex_destroy(&c->ioctl_lock); |
2262 | 2227 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h index 8c9095b2..ff96d0d7 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h | |||
@@ -273,11 +273,6 @@ struct channel_gk20a { | |||
273 | bool timeout_debug_dump; | 273 | bool timeout_debug_dump; |
274 | unsigned int timeslice_us; | 274 | unsigned int timeslice_us; |
275 | 275 | ||
276 | struct dma_buf *error_notifier_ref; | ||
277 | struct nvgpu_notification *error_notifier; | ||
278 | void *error_notifier_va; | ||
279 | struct nvgpu_mutex error_notifier_mutex; | ||
280 | |||
281 | struct nvgpu_mutex sync_lock; | 276 | struct nvgpu_mutex sync_lock; |
282 | struct gk20a_channel_sync *sync; | 277 | struct gk20a_channel_sync *sync; |
283 | 278 | ||
@@ -335,8 +330,6 @@ bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch, | |||
335 | void gk20a_disable_channel(struct channel_gk20a *ch); | 330 | void gk20a_disable_channel(struct channel_gk20a *ch); |
336 | void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt); | 331 | void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt); |
337 | void gk20a_channel_abort_clean_up(struct channel_gk20a *ch); | 332 | void gk20a_channel_abort_clean_up(struct channel_gk20a *ch); |
338 | void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error); | ||
339 | void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error); | ||
340 | void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events); | 333 | void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events); |
341 | int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 size, | 334 | int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 size, |
342 | struct priv_cmd_entry *entry); | 335 | struct priv_cmd_entry *entry); |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 648a8c86..38aecc93 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <nvgpu/nvhost.h> | 39 | #include <nvgpu/nvhost.h> |
40 | #include <nvgpu/barrier.h> | 40 | #include <nvgpu/barrier.h> |
41 | #include <nvgpu/ctxsw_trace.h> | 41 | #include <nvgpu/ctxsw_trace.h> |
42 | #include <nvgpu/error_notifier.h> | ||
42 | 43 | ||
43 | #include "gk20a.h" | 44 | #include "gk20a.h" |
44 | #include "mm_gk20a.h" | 45 | #include "mm_gk20a.h" |
@@ -557,7 +558,6 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | |||
557 | nvgpu_mutex_destroy(&tsg->event_id_list_lock); | 558 | nvgpu_mutex_destroy(&tsg->event_id_list_lock); |
558 | 559 | ||
559 | nvgpu_mutex_destroy(&c->ioctl_lock); | 560 | nvgpu_mutex_destroy(&c->ioctl_lock); |
560 | nvgpu_mutex_destroy(&c->error_notifier_mutex); | ||
561 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); | 561 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); |
562 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); | 562 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); |
563 | nvgpu_mutex_destroy(&c->sync_lock); | 563 | nvgpu_mutex_destroy(&c->sync_lock); |
@@ -1339,14 +1339,10 @@ static bool gk20a_fifo_ch_timeout_debug_dump_state(struct gk20a *g, | |||
1339 | if (!refch) | 1339 | if (!refch) |
1340 | return verbose; | 1340 | return verbose; |
1341 | 1341 | ||
1342 | nvgpu_mutex_acquire(&refch->error_notifier_mutex); | 1342 | if (nvgpu_is_error_notifier_set(refch, |
1343 | if (refch->error_notifier_ref) { | 1343 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT)) |
1344 | u32 err = refch->error_notifier->info32; | 1344 | verbose = refch->timeout_debug_dump; |
1345 | 1345 | ||
1346 | if (err == NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT) | ||
1347 | verbose = refch->timeout_debug_dump; | ||
1348 | } | ||
1349 | nvgpu_mutex_release(&refch->error_notifier_mutex); | ||
1350 | return verbose; | 1346 | return verbose; |
1351 | } | 1347 | } |
1352 | 1348 | ||
@@ -1400,8 +1396,8 @@ void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g, | |||
1400 | { | 1396 | { |
1401 | nvgpu_err(g, | 1397 | nvgpu_err(g, |
1402 | "channel %d generated a mmu fault", refch->chid); | 1398 | "channel %d generated a mmu fault", refch->chid); |
1403 | gk20a_set_error_notifier(refch, | 1399 | nvgpu_set_error_notifier(refch, |
1404 | NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); | 1400 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT); |
1405 | } | 1401 | } |
1406 | 1402 | ||
1407 | void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, | 1403 | void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g, |
@@ -1939,7 +1935,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
1939 | 1935 | ||
1940 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { | 1936 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { |
1941 | if (gk20a_channel_get(ch_tsg)) { | 1937 | if (gk20a_channel_get(ch_tsg)) { |
1942 | gk20a_set_error_notifier(ch_tsg, err_code); | 1938 | nvgpu_set_error_notifier(ch_tsg, err_code); |
1943 | gk20a_channel_put(ch_tsg); | 1939 | gk20a_channel_put(ch_tsg); |
1944 | } | 1940 | } |
1945 | } | 1941 | } |
@@ -1947,7 +1943,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, | |||
1947 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | 1943 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
1948 | gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); | 1944 | gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); |
1949 | } else { | 1945 | } else { |
1950 | gk20a_set_error_notifier(ch, err_code); | 1946 | nvgpu_set_error_notifier(ch, err_code); |
1951 | gk20a_fifo_recover_ch(g, ch->chid, verbose); | 1947 | gk20a_fifo_recover_ch(g, ch->chid, verbose); |
1952 | } | 1948 | } |
1953 | 1949 | ||
@@ -2108,8 +2104,8 @@ static bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch, | |||
2108 | *verbose = ch->timeout_debug_dump; | 2104 | *verbose = ch->timeout_debug_dump; |
2109 | *ms = ch->timeout_accumulated_ms; | 2105 | *ms = ch->timeout_accumulated_ms; |
2110 | if (recover) | 2106 | if (recover) |
2111 | gk20a_set_error_notifier(ch, | 2107 | nvgpu_set_error_notifier(ch, |
2112 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2108 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2113 | 2109 | ||
2114 | gk20a_channel_put(ch); | 2110 | gk20a_channel_put(ch); |
2115 | } | 2111 | } |
@@ -2170,8 +2166,8 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
2170 | gk20a_channel_put(ch); | 2166 | gk20a_channel_put(ch); |
2171 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2167 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2172 | if (gk20a_channel_get(ch)) { | 2168 | if (gk20a_channel_get(ch)) { |
2173 | gk20a_set_error_notifier(ch, | 2169 | nvgpu_set_error_notifier(ch, |
2174 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2170 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2175 | *verbose |= ch->timeout_debug_dump; | 2171 | *verbose |= ch->timeout_debug_dump; |
2176 | gk20a_channel_put(ch); | 2172 | gk20a_channel_put(ch); |
2177 | } | 2173 | } |
@@ -2413,7 +2409,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, | |||
2413 | rc_type = RC_TYPE_PBDMA_FAULT; | 2409 | rc_type = RC_TYPE_PBDMA_FAULT; |
2414 | nvgpu_err(g, | 2410 | nvgpu_err(g, |
2415 | "semaphore acquire timeout!"); | 2411 | "semaphore acquire timeout!"); |
2416 | *error_notifier = NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT; | 2412 | *error_notifier = NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT; |
2417 | } | 2413 | } |
2418 | *handled |= pbdma_intr_0_acquire_pending_f(); | 2414 | *handled |= pbdma_intr_0_acquire_pending_f(); |
2419 | } | 2415 | } |
@@ -2431,7 +2427,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, | |||
2431 | 2427 | ||
2432 | if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { | 2428 | if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { |
2433 | *error_notifier = | 2429 | *error_notifier = |
2434 | NVGPU_CHANNEL_PBDMA_PUSHBUFFER_CRC_MISMATCH; | 2430 | NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH; |
2435 | rc_type = RC_TYPE_PBDMA_FAULT; | 2431 | rc_type = RC_TYPE_PBDMA_FAULT; |
2436 | } | 2432 | } |
2437 | 2433 | ||
@@ -2485,7 +2481,7 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g, | |||
2485 | struct channel_gk20a *ch = &f->channel[id]; | 2481 | struct channel_gk20a *ch = &f->channel[id]; |
2486 | 2482 | ||
2487 | if (gk20a_channel_get(ch)) { | 2483 | if (gk20a_channel_get(ch)) { |
2488 | gk20a_set_error_notifier(ch, error_notifier); | 2484 | nvgpu_set_error_notifier(ch, error_notifier); |
2489 | gk20a_fifo_recover_ch(g, id, true); | 2485 | gk20a_fifo_recover_ch(g, id, true); |
2490 | gk20a_channel_put(ch); | 2486 | gk20a_channel_put(ch); |
2491 | } | 2487 | } |
@@ -2497,7 +2493,7 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g, | |||
2497 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | 2493 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
2498 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2494 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2499 | if (gk20a_channel_get(ch)) { | 2495 | if (gk20a_channel_get(ch)) { |
2500 | gk20a_set_error_notifier(ch, | 2496 | nvgpu_set_error_notifier(ch, |
2501 | error_notifier); | 2497 | error_notifier); |
2502 | gk20a_channel_put(ch); | 2498 | gk20a_channel_put(ch); |
2503 | } | 2499 | } |
@@ -2514,7 +2510,7 @@ u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f, | |||
2514 | u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); | 2510 | u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); |
2515 | 2511 | ||
2516 | u32 handled = 0; | 2512 | u32 handled = 0; |
2517 | u32 error_notifier = NVGPU_CHANNEL_PBDMA_ERROR; | 2513 | u32 error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR; |
2518 | unsigned int rc_type = RC_TYPE_NO_RC; | 2514 | unsigned int rc_type = RC_TYPE_NO_RC; |
2519 | 2515 | ||
2520 | if (pbdma_intr_0) { | 2516 | if (pbdma_intr_0) { |
@@ -2658,8 +2654,8 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | |||
2658 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | 2654 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { |
2659 | if (!gk20a_channel_get(ch)) | 2655 | if (!gk20a_channel_get(ch)) |
2660 | continue; | 2656 | continue; |
2661 | gk20a_set_error_notifier(ch, | 2657 | nvgpu_set_error_notifier(ch, |
2662 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2658 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2663 | gk20a_channel_put(ch); | 2659 | gk20a_channel_put(ch); |
2664 | } | 2660 | } |
2665 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | 2661 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
@@ -2671,8 +2667,8 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | |||
2671 | "preempt channel %d timeout", id); | 2667 | "preempt channel %d timeout", id); |
2672 | 2668 | ||
2673 | if (gk20a_channel_get(ch)) { | 2669 | if (gk20a_channel_get(ch)) { |
2674 | gk20a_set_error_notifier(ch, | 2670 | nvgpu_set_error_notifier(ch, |
2675 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); | 2671 | NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); |
2676 | gk20a_fifo_recover_ch(g, id, true); | 2672 | gk20a_fifo_recover_ch(g, id, true); |
2677 | gk20a_channel_put(ch); | 2673 | gk20a_channel_put(ch); |
2678 | } | 2674 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 270d36d6..b3969b60 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <nvgpu/barrier.h> | 40 | #include <nvgpu/barrier.h> |
41 | #include <nvgpu/mm.h> | 41 | #include <nvgpu/mm.h> |
42 | #include <nvgpu/ctxsw_trace.h> | 42 | #include <nvgpu/ctxsw_trace.h> |
43 | #include <nvgpu/error_notifier.h> | ||
43 | 44 | ||
44 | #include "gk20a.h" | 45 | #include "gk20a.h" |
45 | #include "gr_ctx_gk20a.h" | 46 | #include "gr_ctx_gk20a.h" |
@@ -5113,14 +5114,14 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g, | |||
5113 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | 5114 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); |
5114 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { | 5115 | list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { |
5115 | if (gk20a_channel_get(ch_tsg)) { | 5116 | if (gk20a_channel_get(ch_tsg)) { |
5116 | gk20a_set_error_notifier(ch_tsg, | 5117 | nvgpu_set_error_notifier(ch_tsg, |
5117 | error_notifier); | 5118 | error_notifier); |
5118 | gk20a_channel_put(ch_tsg); | 5119 | gk20a_channel_put(ch_tsg); |
5119 | } | 5120 | } |
5120 | } | 5121 | } |
5121 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | 5122 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
5122 | } else { | 5123 | } else { |
5123 | gk20a_set_error_notifier(ch, error_notifier); | 5124 | nvgpu_set_error_notifier(ch, error_notifier); |
5124 | } | 5125 | } |
5125 | } | 5126 | } |
5126 | } | 5127 | } |
@@ -5130,7 +5131,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g, | |||
5130 | { | 5131 | { |
5131 | gk20a_dbg_fn(""); | 5132 | gk20a_dbg_fn(""); |
5132 | gk20a_gr_set_error_notifier(g, isr_data, | 5133 | gk20a_gr_set_error_notifier(g, isr_data, |
5133 | NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); | 5134 | NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT); |
5134 | nvgpu_err(g, | 5135 | nvgpu_err(g, |
5135 | "gr semaphore timeout"); | 5136 | "gr semaphore timeout"); |
5136 | return -EINVAL; | 5137 | return -EINVAL; |
@@ -5141,7 +5142,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g, | |||
5141 | { | 5142 | { |
5142 | gk20a_dbg_fn(""); | 5143 | gk20a_dbg_fn(""); |
5143 | gk20a_gr_set_error_notifier(g, isr_data, | 5144 | gk20a_gr_set_error_notifier(g, isr_data, |
5144 | NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); | 5145 | NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); |
5145 | /* This is an unrecoverable error, reset is needed */ | 5146 | /* This is an unrecoverable error, reset is needed */ |
5146 | nvgpu_err(g, | 5147 | nvgpu_err(g, |
5147 | "gr semaphore timeout"); | 5148 | "gr semaphore timeout"); |
@@ -5156,7 +5157,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g, | |||
5156 | isr_data->data_lo); | 5157 | isr_data->data_lo); |
5157 | if (ret) { | 5158 | if (ret) { |
5158 | gk20a_gr_set_error_notifier(g, isr_data, | 5159 | gk20a_gr_set_error_notifier(g, isr_data, |
5159 | NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); | 5160 | NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY); |
5160 | nvgpu_err(g, "invalid method class 0x%08x" | 5161 | nvgpu_err(g, "invalid method class 0x%08x" |
5161 | ", offset 0x%08x address 0x%08x", | 5162 | ", offset 0x%08x address 0x%08x", |
5162 | isr_data->class_num, isr_data->offset, isr_data->addr); | 5163 | isr_data->class_num, isr_data->offset, isr_data->addr); |
@@ -5169,7 +5170,7 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g, | |||
5169 | { | 5170 | { |
5170 | gk20a_dbg_fn(""); | 5171 | gk20a_dbg_fn(""); |
5171 | gk20a_gr_set_error_notifier(g, isr_data, | 5172 | gk20a_gr_set_error_notifier(g, isr_data, |
5172 | NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); | 5173 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); |
5173 | nvgpu_err(g, | 5174 | nvgpu_err(g, |
5174 | "invalid class 0x%08x, offset 0x%08x", | 5175 | "invalid class 0x%08x, offset 0x%08x", |
5175 | isr_data->class_num, isr_data->offset); | 5176 | isr_data->class_num, isr_data->offset); |
@@ -5193,7 +5194,7 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch, | |||
5193 | 5194 | ||
5194 | if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) { | 5195 | if (gr_fecs_intr & gr_fecs_host_int_status_umimp_firmware_method_f(1)) { |
5195 | gk20a_gr_set_error_notifier(g, isr_data, | 5196 | gk20a_gr_set_error_notifier(g, isr_data, |
5196 | NVGPU_CHANNEL_FECS_ERR_UNIMP_FIRMWARE_METHOD); | 5197 | NVGPU_ERR_NOTIFIER_FECS_ERR_UNIMP_FIRMWARE_METHOD); |
5197 | nvgpu_err(g, | 5198 | nvgpu_err(g, |
5198 | "firmware method error 0x%08x for offset 0x%04x", | 5199 | "firmware method error 0x%08x for offset 0x%04x", |
5199 | gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)), | 5200 | gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6)), |
@@ -5215,7 +5216,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g, | |||
5215 | gr_class_error = | 5216 | gr_class_error = |
5216 | gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); | 5217 | gr_class_error_code_v(gk20a_readl(g, gr_class_error_r())); |
5217 | gk20a_gr_set_error_notifier(g, isr_data, | 5218 | gk20a_gr_set_error_notifier(g, isr_data, |
5218 | NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); | 5219 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); |
5219 | nvgpu_err(g, "class error 0x%08x, offset 0x%08x," | 5220 | nvgpu_err(g, "class error 0x%08x, offset 0x%08x," |
5220 | "sub channel 0x%08x mme generated %d," | 5221 | "sub channel 0x%08x mme generated %d," |
5221 | " mme pc 0x%08xdata high %d priv status %d" | 5222 | " mme pc 0x%08xdata high %d priv status %d" |
@@ -5244,7 +5245,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g, | |||
5244 | gk20a_dbg_fn(""); | 5245 | gk20a_dbg_fn(""); |
5245 | 5246 | ||
5246 | gk20a_gr_set_error_notifier(g, isr_data, | 5247 | gk20a_gr_set_error_notifier(g, isr_data, |
5247 | NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); | 5248 | NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY); |
5248 | nvgpu_err(g, | 5249 | nvgpu_err(g, |
5249 | "firmware method 0x%08x, offset 0x%08x for channel %u", | 5250 | "firmware method 0x%08x, offset 0x%08x for channel %u", |
5250 | isr_data->class_num, isr_data->offset, | 5251 | isr_data->class_num, isr_data->offset, |
@@ -6024,7 +6025,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
6024 | if (need_reset) { | 6025 | if (need_reset) { |
6025 | nvgpu_err(g, "set gr exception notifier"); | 6026 | nvgpu_err(g, "set gr exception notifier"); |
6026 | gk20a_gr_set_error_notifier(g, &isr_data, | 6027 | gk20a_gr_set_error_notifier(g, &isr_data, |
6027 | NVGPU_CHANNEL_GR_EXCEPTION); | 6028 | NVGPU_ERR_NOTIFIER_GR_EXCEPTION); |
6028 | } | 6029 | } |
6029 | } | 6030 | } |
6030 | 6031 | ||