diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 146 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/fifo_gm20b.c | 1 |
4 files changed, 90 insertions, 66 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index c32c47fb..a5863567 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -2258,7 +2258,7 @@ static inline void gk20a_fifo_reset_pbdma_header(struct gk20a *g, int pbdma_id) | |||
2258 | pbdma_pb_header_type_non_inc_f()); | 2258 | pbdma_pb_header_type_non_inc_f()); |
2259 | } | 2259 | } |
2260 | 2260 | ||
2261 | static inline void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id, | 2261 | void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id, |
2262 | int pbdma_method_index) | 2262 | int pbdma_method_index) |
2263 | { | 2263 | { |
2264 | u32 pbdma_method_stride; | 2264 | u32 pbdma_method_stride; |
@@ -2299,6 +2299,79 @@ static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id, | |||
2299 | return false; | 2299 | return false; |
2300 | } | 2300 | } |
2301 | 2301 | ||
2302 | unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, | ||
2303 | u32 pbdma_intr_0, u32 *handled, u32 *error_notifier) | ||
2304 | { | ||
2305 | struct fifo_gk20a *f = &g->fifo; | ||
2306 | unsigned int rc_type = RC_TYPE_NO_RC; | ||
2307 | int i; | ||
2308 | |||
2309 | if ((f->intr.pbdma.device_fatal_0 | | ||
2310 | f->intr.pbdma.channel_fatal_0 | | ||
2311 | f->intr.pbdma.restartable_0) & pbdma_intr_0) { | ||
2312 | nvgpu_err(g, | ||
2313 | "pbdma_intr_0(%d):0x%08x PBH: %08x " | ||
2314 | "SHADOW: %08x M0: %08x %08x %08x %08x ", | ||
2315 | pbdma_id, pbdma_intr_0, | ||
2316 | gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), | ||
2317 | gk20a_readl(g, pbdma_hdr_shadow_r(pbdma_id)), | ||
2318 | gk20a_readl(g, pbdma_method0_r(pbdma_id)), | ||
2319 | gk20a_readl(g, pbdma_method1_r(pbdma_id)), | ||
2320 | gk20a_readl(g, pbdma_method2_r(pbdma_id)), | ||
2321 | gk20a_readl(g, pbdma_method3_r(pbdma_id)) | ||
2322 | ); | ||
2323 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2324 | *handled |= ((f->intr.pbdma.device_fatal_0 | | ||
2325 | f->intr.pbdma.channel_fatal_0 | | ||
2326 | f->intr.pbdma.restartable_0) & | ||
2327 | pbdma_intr_0); | ||
2328 | } | ||
2329 | |||
2330 | if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) { | ||
2331 | u32 val = gk20a_readl(g, pbdma_acquire_r(pbdma_id)); | ||
2332 | |||
2333 | val &= ~pbdma_acquire_timeout_en_enable_f(); | ||
2334 | gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); | ||
2335 | if (g->timeouts_enabled) { | ||
2336 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2337 | nvgpu_err(g, | ||
2338 | "semaphore acquire timeout!"); | ||
2339 | } | ||
2340 | *handled |= pbdma_intr_0_acquire_pending_f(); | ||
2341 | } | ||
2342 | |||
2343 | if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) { | ||
2344 | gk20a_fifo_reset_pbdma_header(g, pbdma_id); | ||
2345 | gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); | ||
2346 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2347 | } | ||
2348 | |||
2349 | if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) { | ||
2350 | gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); | ||
2351 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2352 | } | ||
2353 | |||
2354 | if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { | ||
2355 | *error_notifier = | ||
2356 | NVGPU_CHANNEL_PBDMA_PUSHBUFFER_CRC_MISMATCH; | ||
2357 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2358 | } | ||
2359 | |||
2360 | if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) { | ||
2361 | gk20a_fifo_reset_pbdma_header(g, pbdma_id); | ||
2362 | |||
2363 | for (i = 0; i < 4; i++) { | ||
2364 | if (gk20a_fifo_is_sw_method_subch(g, | ||
2365 | pbdma_id, i)) | ||
2366 | gk20a_fifo_reset_pbdma_method(g, | ||
2367 | pbdma_id, i); | ||
2368 | } | ||
2369 | rc_type = RC_TYPE_PBDMA_FAULT; | ||
2370 | } | ||
2371 | |||
2372 | return rc_type; | ||
2373 | } | ||
2374 | |||
2302 | static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, | 2375 | static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, |
2303 | struct fifo_gk20a *f, | 2376 | struct fifo_gk20a *f, |
2304 | u32 pbdma_id) | 2377 | u32 pbdma_id) |
@@ -2309,75 +2382,15 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, | |||
2309 | 2382 | ||
2310 | u32 handled = 0; | 2383 | u32 handled = 0; |
2311 | u32 error_notifier = NVGPU_CHANNEL_PBDMA_ERROR; | 2384 | u32 error_notifier = NVGPU_CHANNEL_PBDMA_ERROR; |
2312 | bool reset = false; | 2385 | unsigned int rc_type = RC_TYPE_NO_RC; |
2313 | int i; | ||
2314 | 2386 | ||
2315 | gk20a_dbg_fn(""); | 2387 | gk20a_dbg_fn(""); |
2316 | 2388 | ||
2317 | gk20a_dbg(gpu_dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id, | 2389 | gk20a_dbg(gpu_dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id, |
2318 | pbdma_intr_0, pbdma_intr_1); | 2390 | pbdma_intr_0, pbdma_intr_1); |
2319 | if (pbdma_intr_0) { | 2391 | if (pbdma_intr_0) { |
2320 | if ((f->intr.pbdma.device_fatal_0 | | 2392 | rc_type = g->ops.fifo.handle_pbdma_intr_0(g, pbdma_id, |
2321 | f->intr.pbdma.channel_fatal_0 | | 2393 | pbdma_intr_0, &handled, &error_notifier); |
2322 | f->intr.pbdma.restartable_0) & pbdma_intr_0) { | ||
2323 | nvgpu_err(g, | ||
2324 | "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x", | ||
2325 | pbdma_id, pbdma_intr_0, | ||
2326 | gk20a_readl(g, pbdma_pb_header_r(pbdma_id)), | ||
2327 | gk20a_readl(g, pbdma_hdr_shadow_r(pbdma_id)), | ||
2328 | gk20a_readl(g, pbdma_method0_r(pbdma_id)), | ||
2329 | gk20a_readl(g, pbdma_method1_r(pbdma_id)), | ||
2330 | gk20a_readl(g, pbdma_method2_r(pbdma_id)), | ||
2331 | gk20a_readl(g, pbdma_method3_r(pbdma_id)) | ||
2332 | ); | ||
2333 | reset = true; | ||
2334 | handled |= ((f->intr.pbdma.device_fatal_0 | | ||
2335 | f->intr.pbdma.channel_fatal_0 | | ||
2336 | f->intr.pbdma.restartable_0) & | ||
2337 | pbdma_intr_0); | ||
2338 | } | ||
2339 | |||
2340 | if (pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) { | ||
2341 | u32 val = gk20a_readl(g, pbdma_acquire_r(pbdma_id)); | ||
2342 | val &= ~pbdma_acquire_timeout_en_enable_f(); | ||
2343 | gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); | ||
2344 | if (g->timeouts_enabled) { | ||
2345 | reset = true; | ||
2346 | nvgpu_err(g, | ||
2347 | "semaphore acquire timeout!"); | ||
2348 | } | ||
2349 | handled |= pbdma_intr_0_acquire_pending_f(); | ||
2350 | } | ||
2351 | |||
2352 | if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) { | ||
2353 | gk20a_fifo_reset_pbdma_header(g, pbdma_id); | ||
2354 | gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); | ||
2355 | reset = true; | ||
2356 | } | ||
2357 | |||
2358 | if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) { | ||
2359 | gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); | ||
2360 | reset = true; | ||
2361 | } | ||
2362 | |||
2363 | if (pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) { | ||
2364 | error_notifier = | ||
2365 | NVGPU_CHANNEL_PBDMA_PUSHBUFFER_CRC_MISMATCH; | ||
2366 | reset = true; | ||
2367 | } | ||
2368 | |||
2369 | if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) { | ||
2370 | gk20a_fifo_reset_pbdma_header(g, pbdma_id); | ||
2371 | |||
2372 | for (i = 0; i < 4; i++) { | ||
2373 | if (gk20a_fifo_is_sw_method_subch(g, | ||
2374 | pbdma_id, i)) | ||
2375 | gk20a_fifo_reset_pbdma_method(g, | ||
2376 | pbdma_id, i); | ||
2377 | } | ||
2378 | reset = true; | ||
2379 | } | ||
2380 | |||
2381 | gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0); | 2394 | gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0); |
2382 | } | 2395 | } |
2383 | 2396 | ||
@@ -2386,11 +2399,11 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, | |||
2386 | if (pbdma_intr_1) { | 2399 | if (pbdma_intr_1) { |
2387 | nvgpu_err(g, "channel hce error: pbdma_intr_1(%d): 0x%08x", | 2400 | nvgpu_err(g, "channel hce error: pbdma_intr_1(%d): 0x%08x", |
2388 | pbdma_id, pbdma_intr_1); | 2401 | pbdma_id, pbdma_intr_1); |
2389 | reset = true; | 2402 | rc_type = RC_TYPE_PBDMA_FAULT; |
2390 | gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); | 2403 | gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); |
2391 | } | 2404 | } |
2392 | 2405 | ||
2393 | if (reset) { | 2406 | if (rc_type == RC_TYPE_PBDMA_FAULT) { |
2394 | /* Remove the channel from runlist */ | 2407 | /* Remove the channel from runlist */ |
2395 | u32 id = fifo_pbdma_status_id_v(status); | 2408 | u32 id = fifo_pbdma_status_id_v(status); |
2396 | if (fifo_pbdma_status_id_type_v(status) | 2409 | if (fifo_pbdma_status_id_type_v(status) |
@@ -4277,4 +4290,5 @@ void gk20a_init_fifo(struct gpu_ops *gops) | |||
4277 | gops->fifo.pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val; | 4290 | gops->fifo.pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val; |
4278 | gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg; | 4291 | gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg; |
4279 | gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error; | 4292 | gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error; |
4293 | gops->fifo.handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0; | ||
4280 | } | 4294 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index dc43c532..70addf13 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | |||
@@ -42,6 +42,8 @@ | |||
42 | 42 | ||
43 | #define RC_TYPE_NORMAL 0 | 43 | #define RC_TYPE_NORMAL 0 |
44 | #define RC_TYPE_MMU_FAULT 1 | 44 | #define RC_TYPE_MMU_FAULT 1 |
45 | #define RC_TYPE_PBDMA_FAULT 2 | ||
46 | #define RC_TYPE_NO_RC 0xff | ||
45 | 47 | ||
46 | /* | 48 | /* |
47 | * Number of entries in the kickoff latency buffer, used to calculate | 49 | * Number of entries in the kickoff latency buffer, used to calculate |
@@ -391,4 +393,8 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg, | |||
391 | bool *verbose, u32 *ms); | 393 | bool *verbose, u32 *ms); |
392 | bool gk20a_fifo_handle_sched_error(struct gk20a *g); | 394 | bool gk20a_fifo_handle_sched_error(struct gk20a *g); |
393 | 395 | ||
396 | void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id, | ||
397 | int pbdma_method_index); | ||
398 | unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id, | ||
399 | u32 pbdma_intr_0, u32 *handled, u32 *error_notifier); | ||
394 | #endif /*__GR_GK20A_H__*/ | 400 | #endif /*__GR_GK20A_H__*/ |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index e4450185..7d7d573a 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -474,6 +474,9 @@ struct gpu_ops { | |||
474 | struct mmu_fault_info *mmfault); | 474 | struct mmu_fault_info *mmfault); |
475 | bool (*handle_sched_error)(struct gk20a *g); | 475 | bool (*handle_sched_error)(struct gk20a *g); |
476 | bool (*handle_ctxsw_timeout)(struct gk20a *g, u32 fifo_intr); | 476 | bool (*handle_ctxsw_timeout)(struct gk20a *g, u32 fifo_intr); |
477 | unsigned int (*handle_pbdma_intr_0)(struct gk20a *g, | ||
478 | u32 pbdma_id, u32 pbdma_intr_0, | ||
479 | u32 *handled, u32 *error_notifier); | ||
477 | } fifo; | 480 | } fifo; |
478 | struct pmu_v { | 481 | struct pmu_v { |
479 | /*used for change of enum zbc update cmd id from ver 0 to ver1*/ | 482 | /*used for change of enum zbc update cmd id from ver 0 to ver1*/ |
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c index 2f705004..af2a8cd2 100644 --- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c | |||
@@ -229,4 +229,5 @@ void gm20b_init_fifo(struct gpu_ops *gops) | |||
229 | gops->fifo.reset_enable_hw = gk20a_init_fifo_reset_enable_hw; | 229 | gops->fifo.reset_enable_hw = gk20a_init_fifo_reset_enable_hw; |
230 | gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg; | 230 | gops->fifo.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg; |
231 | gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error; | 231 | gops->fifo.handle_sched_error = gk20a_fifo_handle_sched_error; |
232 | gops->fifo.handle_pbdma_intr_0 = gk20a_fifo_handle_pbdma_intr_0; | ||
232 | } | 233 | } |