summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2017-02-22 19:01:20 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-21 15:08:06 -0400
commit8929fb2b1db9ddb986070efd998772e56e669e70 (patch)
tree3af946cac178ca8d4e2c06191b7934e32d7c5d3e /drivers/gpu/nvgpu/gv11b
parent18a0a89f4535d2f14e4a282ac01482a4c3e2e805 (diff)
gpu: nvgpu: gv11b: implement is_preempt_pending
preempt completion should be decided based on pbdma and engine status. preempt_pending field is no longer used to detect if preempt finished. add a new function to to be used for preeempting ch and tsg during recovery. If preempt timeouts while in recovery, do not issue recovery. JIRA GPUT19X-7 Change-Id: I0d69d12ee6a118f6628b33be5ba387c72983b32a Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: http://git-master/r/1309850 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c305
1 files changed, 305 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index eb8f44d7..b671628a 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -16,6 +16,7 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18#include "nvgpu/semaphore.h" 18#include "nvgpu/semaphore.h"
19#include <nvgpu/timers.h>
19 20
20#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
21 22
@@ -323,6 +324,306 @@ static u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g)
323 return intr_0_error_mask; 324 return intr_0_error_mask;
324} 325}
325 326
327static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
328 u32 pbdma_id, unsigned int timeout_rc_type)
329{
330 struct nvgpu_timeout timeout;
331 unsigned long delay = GR_IDLE_CHECK_DEFAULT;
332 u32 pbdma_stat;
333 u32 chan_stat;
334 int ret = -EBUSY;
335
336 /*
337 * If the PBDMA has a stalling interrupt and receives a NACK, the PBDMA
338 * won't save out until the STALLING interrupt is cleared. Note that
339 * the stalling interrupt need not be directly addressed, as simply
340 * clearing of the interrupt bit will be sufficient to allow the PBDMA
341 * to save out. If the stalling interrupt was due to a SW method or
342 * another deterministic failure, the PBDMA will assert it when the
343 * channel is reloaded/resumed. Note that the fault will still be
344 * reported to SW.
345 */
346
347 if (timeout_rc_type == PREEMPT_TIMEOUT_NORC) {
348 /* called from recovery */
349 u32 pbdma_intr_0, pbdma_intr_1;
350
351 pbdma_intr_0 = gk20a_readl(g, pbdma_intr_0_r(pbdma_id));
352 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
353
354 if (pbdma_intr_0)
355 gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
356 if (pbdma_intr_1)
357 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
358 }
359
360 /* Verify that ch/tsg is no longer on the pbdma */
361 do {
362 pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id));
363 chan_stat = fifo_pbdma_status_chan_status_v(pbdma_stat);
364
365 gk20a_dbg_info("wait preempt pbdma");
366
367 if (chan_stat ==
368 fifo_pbdma_status_chan_status_valid_v() ||
369 chan_stat ==
370 fifo_pbdma_status_chan_status_chsw_save_v()) {
371
372 if (id != fifo_pbdma_status_id_v(pbdma_stat)) {
373 ret = 0;
374 break;
375 }
376
377 } else if (chan_stat ==
378 fifo_pbdma_status_chan_status_chsw_load_v()) {
379
380 if (id != fifo_pbdma_status_next_id_v(pbdma_stat)) {
381 ret = 0;
382 break;
383 }
384
385 } else if (chan_stat ==
386 fifo_pbdma_status_chan_status_chsw_switch_v()) {
387
388 if ((id != fifo_pbdma_status_next_id_v(pbdma_stat)) &&
389 (id != fifo_pbdma_status_id_v(pbdma_stat))) {
390 ret = 0;
391 break;
392 }
393 } else {
394 /* pbdma status is invalid i.e. it is not loaded */
395 ret = 0;
396 break;
397 }
398
399 usleep_range(delay, delay * 2);
400 delay = min_t(unsigned long,
401 delay << 1, GR_IDLE_CHECK_MAX);
402 } while (!nvgpu_timeout_expired_msg(&timeout,
403 "preempt timeout pbdma"));
404 return ret;
405}
406
407static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
408 u32 engine_idx, u32 *reset_eng_bitmask,
409 unsigned int timeout_rc_type)
410{
411 struct nvgpu_timeout timeout;
412 unsigned long delay = GR_IDLE_CHECK_DEFAULT;
413 u32 eng_stat;
414 u32 ctx_stat;
415 int ret = -EBUSY;
416
417 /* Check if ch/tsg has saved off the engine or if ctxsw is hung */
418 do {
419 eng_stat = gk20a_readl(g, fifo_engine_status_r(engine_idx));
420 ctx_stat = fifo_engine_status_ctx_status_v(eng_stat);
421
422 if (ctx_stat ==
423 fifo_engine_status_ctx_status_ctxsw_switch_v()) {
424 gk20a_dbg_info("engine save hasn't started yet");
425
426 } else if (ctx_stat ==
427 fifo_engine_status_ctx_status_valid_v() ||
428 ctx_stat ==
429 fifo_engine_status_ctx_status_ctxsw_save_v()) {
430
431 if (timeout_rc_type == PREEMPT_TIMEOUT_NORC) {
432 /* called from recovery, eng seems to be hung */
433 if (id == fifo_engine_status_id_v(eng_stat)) {
434 *reset_eng_bitmask |= BIT(engine_idx);
435 ret = 0;
436 break;
437 }
438 }
439
440 } else if (ctx_stat ==
441 fifo_engine_status_ctx_status_ctxsw_load_v()) {
442
443 if (timeout_rc_type == PREEMPT_TIMEOUT_NORC) {
444 /* called from recovery, eng seems to be hung */
445 if (id ==
446 fifo_engine_status_next_id_v(eng_stat)) {
447
448 *reset_eng_bitmask |= BIT(engine_idx);
449 ret = 0;
450 break;
451 }
452 }
453
454 } else {
455 /* Preempt should be finished */
456 ret = 0;
457 break;
458 }
459
460 usleep_range(delay, delay * 2);
461 delay = min_t(unsigned long,
462 delay << 1, GR_IDLE_CHECK_MAX);
463 } while (!nvgpu_timeout_expired_msg(&timeout,
464 "preempt timeout engine"));
465 return ret;
466}
467
468int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
469 unsigned int id_type, unsigned int timeout_rc_type)
470{
471 struct fifo_gk20a *f = &g->fifo;
472 unsigned long runlist_served_pbdmas;
473 unsigned long runlist_served_engines;
474 u32 pbdma_id;
475 u32 act_eng_id;
476 u32 runlist_id;
477 int func_ret;
478 int ret = 0;
479
480 gk20a_dbg_fn("");
481
482 if (id_type == ID_TYPE_TSG)
483 runlist_id = f->tsg[id].runlist_id;
484 else
485 runlist_id = f->channel[id].runlist_id;
486
487 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask;
488 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
489
490 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) {
491
492 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, id, pbdma_id,
493 timeout_rc_type);
494 if (func_ret != 0) {
495 gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id);
496 ret |= func_ret;
497 }
498 }
499
500 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
501
502 for_each_set_bit(act_eng_id, &runlist_served_engines, f->num_engines) {
503
504 func_ret = gv11b_fifo_poll_eng_ctx_status(g, id, act_eng_id,
505 &f->runlist_info[runlist_id].reset_eng_bitmask,
506 timeout_rc_type);
507
508 if (func_ret != 0) {
509 gk20a_dbg_info("preempt timeout engine %d", act_eng_id);
510 ret |= func_ret;
511 }
512 }
513
514 return ret;
515}
516
517static int gv11b_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
518{
519 struct fifo_gk20a *f = &g->fifo;
520 u32 ret = 0;
521 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
522 u32 mutex_ret = 0;
523 u32 runlist_id;
524
525 gk20a_dbg_fn("%d", hw_chid);
526
527 runlist_id = f->channel[hw_chid].runlist_id;
528 gk20a_dbg_fn("runlist_id %d", runlist_id);
529
530 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex);
531
532 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
533
534 ret = __locked_fifo_preempt(g, hw_chid, false);
535
536 if (!mutex_ret)
537 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
538
539 nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex);
540
541 return ret;
542}
543
544static int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
545{
546 struct fifo_gk20a *f = &g->fifo;
547 u32 ret = 0;
548 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
549 u32 mutex_ret = 0;
550 u32 runlist_id;
551
552 gk20a_dbg_fn("%d", tsgid);
553
554 runlist_id = f->tsg[tsgid].runlist_id;
555 gk20a_dbg_fn("runlist_id %d", runlist_id);
556
557 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex);
558
559 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
560
561 ret = __locked_fifo_preempt(g, tsgid, true);
562
563 if (!mutex_ret)
564 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
565
566 nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex);
567
568 return ret;
569}
570
571static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
572 unsigned int id_type, unsigned int timeout_rc_type)
573{
574 int ret;
575
576 /* issue preempt */
577 gk20a_fifo_issue_preempt(g, id, id_type);
578
579 /* wait for preempt */
580 ret = g->ops.fifo.is_preempt_pending(g, id, id_type,
581 timeout_rc_type);
582
583 if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC))
584 __locked_fifo_preempt_timeout_rc(g, id, id_type);
585
586 return ret;
587}
588
589
590static int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
591 unsigned int id_type, unsigned int timeout_rc_type)
592{
593 struct fifo_gk20a *f = &g->fifo;
594 u32 ret = 0;
595 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
596 u32 mutex_ret = 0;
597 u32 runlist_id;
598
599 if (id_type == ID_TYPE_TSG)
600 runlist_id = f->tsg[id].runlist_id;
601 else if (id_type == ID_TYPE_CHANNEL)
602 runlist_id = f->channel[id].runlist_id;
603 else
604 return -EINVAL;
605
606 if (runlist_id >= g->fifo.max_runlists) {
607 gk20a_dbg_info("runlist_id = %d", runlist_id);
608 return -EINVAL;
609 }
610
611 gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id);
612
613 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex);
614
615 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
616
617 ret = __locked_fifo_preempt_ch_tsg(g, id, id_type, timeout_rc_type);
618
619 if (!mutex_ret)
620 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
621
622 nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex);
623
624 return ret;
625}
626
326void gv11b_init_fifo(struct gpu_ops *gops) 627void gv11b_init_fifo(struct gpu_ops *gops)
327{ 628{
328 gp10b_init_fifo(gops); 629 gp10b_init_fifo(gops);
@@ -346,4 +647,8 @@ void gv11b_init_fifo(struct gpu_ops *gops)
346 gops->fifo.dump_eng_status = gv11b_dump_eng_status; 647 gops->fifo.dump_eng_status = gv11b_dump_eng_status;
347 gops->fifo.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc; 648 gops->fifo.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc;
348 gops->fifo.intr_0_error_mask = gv11b_fifo_intr_0_error_mask; 649 gops->fifo.intr_0_error_mask = gv11b_fifo_intr_0_error_mask;
650 gops->fifo.preempt_channel = gv11b_fifo_preempt_channel;
651 gops->fifo.preempt_tsg = gv11b_fifo_preempt_tsg;
652 gops->fifo.is_preempt_pending = gv11b_fifo_is_preempt_pending;
653 gops->fifo.preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg;
349} 654}