summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2017-02-22 14:52:46 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-21 15:08:06 -0400
commit51f3f542fabb31527024eba5b8f52bf87cc30659 (patch)
tree3554d88cdd5a442cd3819aff1f041c8d3fa5a739 /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent65e2c567973d549ad02d09d4a83a1676485961fc (diff)
gpu: nvgpu: add is_preempt_pending fifo ops
is_preempt_pending fifo ops is added as t19x preempt done sequence is differnt than legacy chips. Change-Id: I6b46be1f5b911ae11bbe806968cb8fabb21848e0 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: http://git-master/r/1309678 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c95
1 files changed, 56 insertions, 39 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index b52e5310..de2c3f9e 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2441,66 +2441,82 @@ void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg)
2441 fifo_preempt_type_channel_f()); 2441 fifo_preempt_type_channel_f());
2442} 2442}
2443 2443
2444static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) 2444int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
2445 unsigned int id_type, unsigned int timeout_rc_type)
2445{ 2446{
2446 struct nvgpu_timeout timeout; 2447 struct nvgpu_timeout timeout;
2447 u32 delay = GR_IDLE_CHECK_DEFAULT; 2448 u32 delay = GR_IDLE_CHECK_DEFAULT;
2448 u32 ret = 0; 2449 int ret = -EBUSY;
2449
2450 gk20a_dbg_fn("%d", id);
2451
2452 /* issue preempt */
2453 gk20a_fifo_issue_preempt(g, id, is_tsg);
2454 2450
2455 gk20a_dbg_fn("%d", id);
2456 /* wait for preempt */
2457 ret = -EBUSY;
2458 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), 2451 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
2459 NVGPU_TIMER_CPU_TIMER); 2452 NVGPU_TIMER_CPU_TIMER);
2460 do { 2453 do {
2461 if (!(gk20a_readl(g, fifo_preempt_r()) & 2454 if (!(gk20a_readl(g, fifo_preempt_r()) &
2462 fifo_preempt_pending_true_f())) { 2455 fifo_preempt_pending_true_f())) {
2463 ret = 0; 2456 ret = 0;
2464 break; 2457 break;
2465 } 2458 }
2466 2459
2467 usleep_range(delay, delay * 2); 2460 usleep_range(delay, delay * 2);
2468 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 2461 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
2469 } while (!nvgpu_timeout_expired(&timeout)); 2462 } while (!nvgpu_timeout_expired_msg(&timeout, "preempt timeout"));
2470 2463
2471 gk20a_dbg_fn("%d", id); 2464 return ret;
2472 if (ret) { 2465}
2473 if (is_tsg) {
2474 struct tsg_gk20a *tsg = &g->fifo.tsg[id];
2475 struct channel_gk20a *ch = NULL;
2476 2466
2477 gk20a_err(dev_from_gk20a(g), 2467void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
2478 "preempt TSG %d timeout\n", id); 2468 unsigned int id_type)
2469{
2470 if (id_type == ID_TYPE_TSG) {
2471 struct tsg_gk20a *tsg = &g->fifo.tsg[id];
2472 struct channel_gk20a *ch = NULL;
2479 2473
2480 down_read(&tsg->ch_list_lock); 2474 gk20a_err(dev_from_gk20a(g),
2481 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 2475 "preempt TSG %d timeout\n", id);
2482 if (!gk20a_channel_get(ch))
2483 continue;
2484 gk20a_set_error_notifier(ch,
2485 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
2486 gk20a_channel_put(ch);
2487 }
2488 up_read(&tsg->ch_list_lock);
2489 gk20a_fifo_recover_tsg(g, id, true);
2490 } else {
2491 struct channel_gk20a *ch = &g->fifo.channel[id];
2492 2476
2493 gk20a_err(dev_from_gk20a(g), 2477 down_read(&tsg->ch_list_lock);
2494 "preempt channel %d timeout\n", id); 2478 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2479 if (!gk20a_channel_get(ch))
2480 continue;
2481 gk20a_set_error_notifier(ch,
2482 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
2483 gk20a_channel_put(ch);
2484 }
2485 up_read(&tsg->ch_list_lock);
2486 gk20a_fifo_recover_tsg(g, id, true);
2487 } else {
2488 struct channel_gk20a *ch = &g->fifo.channel[id];
2495 2489
2496 if (gk20a_channel_get(ch)) { 2490 gk20a_err(dev_from_gk20a(g),
2497 gk20a_set_error_notifier(ch, 2491 "preempt channel %d timeout\n", id);
2498 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); 2492
2499 gk20a_fifo_recover_ch(g, id, true); 2493 if (gk20a_channel_get(ch)) {
2500 gk20a_channel_put(ch); 2494 gk20a_set_error_notifier(ch,
2501 } 2495 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
2496 gk20a_fifo_recover_ch(g, id, true);
2497 gk20a_channel_put(ch);
2502 } 2498 }
2503 } 2499 }
2500}
2501
2502int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2503{
2504 int ret;
2505 unsigned int id_type;
2506
2507 gk20a_dbg_fn("%d", id);
2508
2509 /* issue preempt */
2510 gk20a_fifo_issue_preempt(g, id, is_tsg);
2511
2512 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL;
2513
2514 /* wait for preempt */
2515 ret = g->ops.fifo.is_preempt_pending(g, id, id_type,
2516 PREEMPT_TIMEOUT_RC);
2517
2518 if (ret)
2519 __locked_fifo_preempt_timeout_rc(g, id, id_type);
2504 2520
2505 return ret; 2521 return ret;
2506} 2522}
@@ -3802,4 +3818,5 @@ void gk20a_init_fifo(struct gpu_ops *gops)
3802 gops->fifo.dump_eng_status = gk20a_dump_eng_status; 3818 gops->fifo.dump_eng_status = gk20a_dump_eng_status;
3803 gops->fifo.dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc; 3819 gops->fifo.dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc;
3804 gops->fifo.intr_0_error_mask = gk20a_fifo_intr_0_error_mask; 3820 gops->fifo.intr_0_error_mask = gk20a_fifo_intr_0_error_mask;
3821 gops->fifo.is_preempt_pending = gk20a_fifo_is_preempt_pending;
3805} 3822}