diff options
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 370 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/fifo_gv11b.h | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h | 16 |
3 files changed, 388 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 2044bb22..57fd24de 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | |||
@@ -18,8 +18,10 @@ | |||
18 | #include "nvgpu/semaphore.h" | 18 | #include "nvgpu/semaphore.h" |
19 | #include <nvgpu/timers.h> | 19 | #include <nvgpu/timers.h> |
20 | 20 | ||
21 | |||
21 | #include "gk20a/gk20a.h" | 22 | #include "gk20a/gk20a.h" |
22 | #include "gk20a/fifo_gk20a.h" | 23 | #include "gk20a/fifo_gk20a.h" |
24 | #include "gk20a/ctxsw_trace_gk20a.h" | ||
23 | 25 | ||
24 | #include "gp10b/fifo_gp10b.h" | 26 | #include "gp10b/fifo_gp10b.h" |
25 | 27 | ||
@@ -360,6 +362,9 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id, | |||
360 | gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); | 362 | gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1); |
361 | } | 363 | } |
362 | 364 | ||
365 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
366 | NVGPU_TIMER_CPU_TIMER); | ||
367 | |||
363 | /* Verify that ch/tsg is no longer on the pbdma */ | 368 | /* Verify that ch/tsg is no longer on the pbdma */ |
364 | do { | 369 | do { |
365 | pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); | 370 | pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); |
@@ -417,6 +422,9 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, | |||
417 | u32 ctx_stat; | 422 | u32 ctx_stat; |
418 | int ret = -EBUSY; | 423 | int ret = -EBUSY; |
419 | 424 | ||
425 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
426 | NVGPU_TIMER_CPU_TIMER); | ||
427 | |||
420 | /* Check if ch/tsg has saved off the engine or if ctxsw is hung */ | 428 | /* Check if ch/tsg has saved off the engine or if ctxsw is hung */ |
421 | do { | 429 | do { |
422 | eng_stat = gk20a_readl(g, fifo_engine_status_r(engine_idx)); | 430 | eng_stat = gk20a_readl(g, fifo_engine_status_r(engine_idx)); |
@@ -472,12 +480,153 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, | |||
472 | ret = 0; | 480 | ret = 0; |
473 | break; | 481 | break; |
474 | } | 482 | } |
483 | usleep_range(delay, delay * 2); | ||
484 | delay = min_t(unsigned long, | ||
485 | delay << 1, GR_IDLE_CHECK_MAX); | ||
486 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
487 | "preempt timeout eng")); | ||
488 | return ret; | ||
489 | } | ||
490 | |||
491 | static void gv11b_reset_eng_faulted_ch(struct gk20a *g, u32 hw_chid) | ||
492 | { | ||
493 | u32 reg_val; | ||
494 | |||
495 | reg_val = gk20a_readl(g, ccsr_channel_r(hw_chid)); | ||
496 | reg_val |= ccsr_channel_eng_faulted_reset_f(); | ||
497 | gk20a_writel(g, ccsr_channel_r(hw_chid), reg_val); | ||
498 | } | ||
499 | |||
500 | static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg) | ||
501 | { | ||
502 | struct gk20a *g = tsg->g; | ||
503 | struct channel_gk20a *ch; | ||
504 | |||
505 | down_read(&tsg->ch_list_lock); | ||
506 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
507 | gv11b_reset_eng_faulted_ch(g, ch->hw_chid); | ||
508 | } | ||
509 | up_read(&tsg->ch_list_lock); | ||
510 | } | ||
511 | |||
512 | static void gv11b_reset_pbdma_faulted_ch(struct gk20a *g, u32 hw_chid) | ||
513 | { | ||
514 | u32 reg_val; | ||
515 | |||
516 | reg_val = gk20a_readl(g, ccsr_channel_r(hw_chid)); | ||
517 | reg_val |= ccsr_channel_pbdma_faulted_reset_f(); | ||
518 | gk20a_writel(g, ccsr_channel_r(hw_chid), reg_val); | ||
519 | } | ||
520 | |||
521 | static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg) | ||
522 | { | ||
523 | struct gk20a *g = tsg->g; | ||
524 | struct channel_gk20a *ch; | ||
525 | |||
526 | down_read(&tsg->ch_list_lock); | ||
527 | list_for_each_entry(ch, &tsg->ch_list, ch_entry) { | ||
528 | gv11b_reset_pbdma_faulted_ch(g, ch->hw_chid); | ||
529 | } | ||
530 | up_read(&tsg->ch_list_lock); | ||
531 | } | ||
532 | |||
533 | u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, | ||
534 | u32 id, unsigned int id_type, unsigned int rc_type, | ||
535 | struct mmu_fault_info *mmfault) | ||
536 | { | ||
537 | u32 runlists_mask = 0; | ||
538 | struct fifo_gk20a *f = &g->fifo; | ||
539 | struct fifo_runlist_info_gk20a *runlist; | ||
540 | u32 pbdma_bitmask = 0; | ||
541 | |||
542 | if (id_type != ID_TYPE_UNKNOWN) { | ||
543 | if (id_type == ID_TYPE_TSG) | ||
544 | runlists_mask = fifo_sched_disable_runlist_m( | ||
545 | f->tsg[id].runlist_id); | ||
546 | else | ||
547 | runlists_mask = fifo_sched_disable_runlist_m( | ||
548 | f->channel[id].runlist_id); | ||
549 | } else { | ||
550 | if (rc_type == RC_TYPE_MMU_FAULT && mmfault) { | ||
551 | if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) | ||
552 | pbdma_bitmask = BIT(mmfault->faulted_pbdma); | ||
553 | |||
554 | for (id = 0; id < f->max_runlists; id++) { | ||
555 | |||
556 | runlist = &f->runlist_info[id]; | ||
557 | |||
558 | if (runlist->eng_bitmask & act_eng_bitmask) | ||
559 | runlists_mask |= | ||
560 | fifo_sched_disable_runlist_m(id); | ||
561 | |||
562 | if (runlist->pbdma_bitmask & pbdma_bitmask) | ||
563 | runlists_mask |= | ||
564 | fifo_sched_disable_runlist_m(id); | ||
565 | } | ||
566 | } else { | ||
567 | /* ID is unknown */ | ||
568 | for (id = 0; id < f->max_runlists; id++) { | ||
569 | runlist = &f->runlist_info[id]; | ||
570 | if (runlist->eng_bitmask & act_eng_bitmask) | ||
571 | runlists_mask |= | ||
572 | fifo_sched_disable_runlist_m(id); | ||
573 | } | ||
574 | } | ||
575 | } | ||
576 | gk20a_dbg_info("runlists_mask = %08x", runlists_mask); | ||
577 | return runlists_mask; | ||
578 | } | ||
579 | |||
580 | static void gv11b_fifo_runlist_event_intr_disable(struct gk20a *g) | ||
581 | { | ||
582 | u32 reg_val; | ||
583 | |||
584 | reg_val = gk20a_readl(g, fifo_intr_en_0_r()); | ||
585 | reg_val &= fifo_intr_0_runlist_event_pending_f(); | ||
586 | gk20a_writel(g, fifo_intr_en_0_r(), reg_val); | ||
587 | } | ||
588 | |||
589 | static void gv11b_fifo_runlist_event_intr_enable(struct gk20a *g) | ||
590 | { | ||
591 | u32 reg_val; | ||
592 | |||
593 | reg_val = gk20a_readl(g, fifo_intr_en_0_r()); | ||
594 | reg_val |= fifo_intr_0_runlist_event_pending_f(); | ||
595 | gk20a_writel(g, fifo_intr_en_0_r(), reg_val); | ||
596 | } | ||
597 | |||
598 | static void gv11b_fifo_issue_runlist_preempt(struct gk20a *g, | ||
599 | u32 runlists_mask) | ||
600 | { | ||
601 | u32 reg_val; | ||
602 | |||
603 | /* issue runlist preempt */ | ||
604 | reg_val = gk20a_readl(g, fifo_runlist_preempt_r()); | ||
605 | reg_val |= runlists_mask; | ||
606 | gk20a_writel(g, fifo_runlist_preempt_r(), reg_val); | ||
607 | } | ||
608 | |||
609 | static int gv11b_fifo_poll_runlist_preempt_pending(struct gk20a *g, | ||
610 | u32 runlists_mask) | ||
611 | { | ||
612 | struct nvgpu_timeout timeout; | ||
613 | u32 delay = GR_IDLE_CHECK_DEFAULT; | ||
614 | int ret = -EBUSY; | ||
615 | |||
616 | nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), | ||
617 | NVGPU_TIMER_CPU_TIMER); | ||
618 | do { | ||
619 | if (!((gk20a_readl(g, fifo_runlist_preempt_r())) & | ||
620 | runlists_mask)) { | ||
621 | ret = 0; | ||
622 | break; | ||
623 | } | ||
475 | 624 | ||
476 | usleep_range(delay, delay * 2); | 625 | usleep_range(delay, delay * 2); |
477 | delay = min_t(unsigned long, | 626 | delay = min_t(unsigned long, |
478 | delay << 1, GR_IDLE_CHECK_MAX); | 627 | delay << 1, GR_IDLE_CHECK_MAX); |
479 | } while (!nvgpu_timeout_expired_msg(&timeout, | 628 | } while (!nvgpu_timeout_expired_msg(&timeout, |
480 | "preempt timeout engine")); | 629 | "runlist preempt timeout")); |
481 | return ret; | 630 | return ret; |
482 | } | 631 | } |
483 | 632 | ||
@@ -557,6 +706,31 @@ static int gv11b_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | |||
557 | return ret; | 706 | return ret; |
558 | } | 707 | } |
559 | 708 | ||
709 | static int __locked_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) | ||
710 | { | ||
711 | int ret; | ||
712 | |||
713 | /* | ||
714 | * Disable runlist event interrupt as it will get | ||
715 | * triggered after runlist preempt finishes | ||
716 | */ | ||
717 | gv11b_fifo_runlist_event_intr_disable(g); | ||
718 | |||
719 | /* issue runlist preempt */ | ||
720 | gv11b_fifo_issue_runlist_preempt(g, runlists_mask); | ||
721 | |||
722 | /* poll for runlist preempt done */ | ||
723 | ret = gv11b_fifo_poll_runlist_preempt_pending(g, runlists_mask); | ||
724 | |||
725 | /* Clear outstanding runlist event */ | ||
726 | gk20a_fifo_handle_runlist_event(g); | ||
727 | |||
728 | /* Enable runlist event interrupt*/ | ||
729 | gv11b_fifo_runlist_event_intr_enable(g); | ||
730 | |||
731 | return ret; | ||
732 | } | ||
733 | |||
560 | static int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | 734 | static int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) |
561 | { | 735 | { |
562 | struct fifo_gk20a *f = &g->fifo; | 736 | struct fifo_gk20a *f = &g->fifo; |
@@ -584,6 +758,38 @@ static int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
584 | return ret; | 758 | return ret; |
585 | } | 759 | } |
586 | 760 | ||
761 | |||
762 | static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) | ||
763 | { | ||
764 | int ret = 0; | ||
765 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | ||
766 | u32 mutex_ret = 0; | ||
767 | u32 runlist_id; | ||
768 | |||
769 | gk20a_dbg_fn(""); | ||
770 | |||
771 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { | ||
772 | if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) | ||
773 | nvgpu_mutex_acquire(&g->fifo. | ||
774 | runlist_info[runlist_id].mutex); | ||
775 | } | ||
776 | |||
777 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
778 | |||
779 | ret = __locked_fifo_preempt_runlists(g, runlists_mask); | ||
780 | |||
781 | if (!mutex_ret) | ||
782 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | ||
783 | |||
784 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { | ||
785 | if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) | ||
786 | nvgpu_mutex_release(&g->fifo. | ||
787 | runlist_info[runlist_id].mutex); | ||
788 | } | ||
789 | |||
790 | return ret; | ||
791 | } | ||
792 | |||
587 | static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, | 793 | static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, |
588 | unsigned int id_type, unsigned int timeout_rc_type) | 794 | unsigned int id_type, unsigned int timeout_rc_type) |
589 | { | 795 | { |
@@ -638,6 +844,167 @@ static int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, | |||
638 | nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); | 844 | nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); |
639 | 845 | ||
640 | return ret; | 846 | return ret; |
847 | |||
848 | } | ||
849 | |||
850 | void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | ||
851 | u32 id, unsigned int id_type, unsigned int rc_type, | ||
852 | struct mmu_fault_info *mmfault) | ||
853 | { | ||
854 | bool verbose = false; | ||
855 | struct tsg_gk20a *tsg = NULL; | ||
856 | struct channel_gk20a *refch = NULL; | ||
857 | u32 runlists_mask, runlist_id; | ||
858 | struct fifo_runlist_info_gk20a *runlist = NULL; | ||
859 | u32 engine_id, client_type = ~0; | ||
860 | |||
861 | gk20a_dbg_info("active engine ids bitmask =0x%x", act_eng_bitmask); | ||
862 | gk20a_dbg_info("hw id =%d", id); | ||
863 | gk20a_dbg_info("id_type =%d", id_type); | ||
864 | gk20a_dbg_info("rc_type =%d", rc_type); | ||
865 | gk20a_dbg_info("mmu_fault =%p", mmfault); | ||
866 | |||
867 | runlists_mask = gv11b_fifo_get_runlists_mask(g, act_eng_bitmask, id, | ||
868 | id_type, rc_type, mmfault); | ||
869 | |||
870 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED, | ||
871 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
872 | |||
873 | g->fifo.deferred_reset_pending = false; | ||
874 | |||
875 | /* Disable power management */ | ||
876 | if (support_gk20a_pmu(g->dev) && g->elpg_enabled) { | ||
877 | if (gk20a_pmu_disable_elpg(g)) | ||
878 | gk20a_err(dev_from_gk20a(g), | ||
879 | "failed to set disable elpg"); | ||
880 | } | ||
881 | if (g->ops.clock_gating.slcg_gr_load_gating_prod) | ||
882 | g->ops.clock_gating.slcg_gr_load_gating_prod(g, | ||
883 | false); | ||
884 | if (g->ops.clock_gating.slcg_perf_load_gating_prod) | ||
885 | g->ops.clock_gating.slcg_perf_load_gating_prod(g, | ||
886 | false); | ||
887 | if (g->ops.clock_gating.slcg_ltc_load_gating_prod) | ||
888 | g->ops.clock_gating.slcg_ltc_load_gating_prod(g, | ||
889 | false); | ||
890 | |||
891 | gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); | ||
892 | |||
893 | if (rc_type == RC_TYPE_MMU_FAULT) | ||
894 | gk20a_debug_dump(g->dev); | ||
895 | |||
896 | /* get the channel/TSG */ | ||
897 | if (rc_type == RC_TYPE_MMU_FAULT && mmfault && mmfault->refch) { | ||
898 | refch = mmfault->refch; | ||
899 | client_type = mmfault->client_type; | ||
900 | if (gk20a_is_channel_marked_as_tsg(refch)) { | ||
901 | tsg = &g->fifo.tsg[refch->tsgid]; | ||
902 | if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) | ||
903 | gv11b_reset_pbdma_faulted_tsg(tsg); | ||
904 | if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID) | ||
905 | gv11b_reset_eng_faulted_tsg(tsg); | ||
906 | } else { | ||
907 | if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) | ||
908 | gv11b_reset_pbdma_faulted_ch(g, refch->hw_chid); | ||
909 | if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID) | ||
910 | gv11b_reset_eng_faulted_ch(g, refch->hw_chid); | ||
911 | } | ||
912 | } else { | ||
913 | if (id_type == ID_TYPE_TSG) | ||
914 | tsg = &g->fifo.tsg[id]; | ||
915 | else if (id_type == ID_TYPE_CHANNEL) | ||
916 | refch = gk20a_channel_get(&g->fifo.channel[id]); | ||
917 | } | ||
918 | |||
919 | if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) { | ||
920 | g->ops.fifo.preempt_ch_tsg(g, id, id_type, | ||
921 | PREEMPT_TIMEOUT_NORC); | ||
922 | } else { | ||
923 | gv11b_fifo_preempt_runlists(g, runlists_mask); | ||
924 | } | ||
925 | |||
926 | if (tsg) { | ||
927 | if (!g->fifo.deferred_reset_pending) { | ||
928 | if (rc_type == RC_TYPE_MMU_FAULT) { | ||
929 | gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg); | ||
930 | verbose = gk20a_fifo_error_tsg(g, tsg); | ||
931 | } | ||
932 | } | ||
933 | gk20a_fifo_abort_tsg(g, tsg->tsgid, false); | ||
934 | if (refch) | ||
935 | gk20a_channel_put(refch); | ||
936 | } else if (refch) { | ||
937 | if (!g->fifo.deferred_reset_pending) { | ||
938 | if (rc_type == RC_TYPE_MMU_FAULT) { | ||
939 | gk20a_fifo_set_ctx_mmu_error_ch(g, refch); | ||
940 | verbose = gk20a_fifo_error_ch(g, refch); | ||
941 | } | ||
942 | } | ||
943 | gk20a_channel_abort(refch, false); | ||
944 | gk20a_channel_put(refch); | ||
945 | } else { | ||
946 | gk20a_err(dev_from_gk20a(g), "id unknown, abort runlist"); | ||
947 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; | ||
948 | runlist_id++) { | ||
949 | if (runlists_mask & BIT(runlist_id)) | ||
950 | g->ops.fifo.update_runlist(g, runlist_id, | ||
951 | FIFO_INVAL_CHANNEL_ID, false, true); | ||
952 | } | ||
953 | } | ||
954 | |||
955 | /* check if engine reset should be deferred */ | ||
956 | for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { | ||
957 | |||
958 | runlist = &g->fifo.runlist_info[runlist_id]; | ||
959 | if ((runlists_mask & BIT(runlist_id)) && | ||
960 | runlist->reset_eng_bitmask) { | ||
961 | |||
962 | unsigned long __reset_eng_bitmask = | ||
963 | runlist->reset_eng_bitmask; | ||
964 | |||
965 | for_each_set_bit(engine_id, &__reset_eng_bitmask, 32) { | ||
966 | if ((refch || tsg) && | ||
967 | gk20a_fifo_should_defer_engine_reset(g, | ||
968 | engine_id, client_type, false)) { | ||
969 | |||
970 | g->fifo.deferred_fault_engines |= | ||
971 | BIT(engine_id); | ||
972 | |||
973 | /* handled during channel free */ | ||
974 | g->fifo.deferred_reset_pending = true; | ||
975 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | ||
976 | "sm debugger attached," | ||
977 | " deferring channel recovery to channel free"); | ||
978 | } else { | ||
979 | /* | ||
980 | * if lock is already taken, a reset is | ||
981 | * taking place so no need to repeat | ||
982 | */ | ||
983 | if (nvgpu_mutex_tryacquire( | ||
984 | &g->fifo.gr_reset_mutex)) { | ||
985 | |||
986 | gk20a_fifo_reset_engine(g, | ||
987 | engine_id); | ||
988 | |||
989 | nvgpu_mutex_release( | ||
990 | &g->fifo.gr_reset_mutex); | ||
991 | } | ||
992 | } | ||
993 | } | ||
994 | } | ||
995 | } | ||
996 | |||
997 | if (refch) | ||
998 | gk20a_ctxsw_trace_channel_reset(g, refch); | ||
999 | else if (tsg) | ||
1000 | gk20a_ctxsw_trace_tsg_reset(g, tsg); | ||
1001 | |||
1002 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED, | ||
1003 | !RUNLIST_INFO_MUTEX_LOCKED); | ||
1004 | |||
1005 | /* It is safe to enable ELPG again. */ | ||
1006 | if (support_gk20a_pmu(g->dev) && g->elpg_enabled) | ||
1007 | gk20a_pmu_enable_elpg(g); | ||
641 | } | 1008 | } |
642 | 1009 | ||
643 | static void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f) | 1010 | static void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f) |
@@ -802,4 +1169,5 @@ void gv11b_init_fifo(struct gpu_ops *gops) | |||
802 | gops->fifo.preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg; | 1169 | gops->fifo.preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg; |
803 | gops->fifo.init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs; | 1170 | gops->fifo.init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs; |
804 | gops->fifo.reset_enable_hw = gv11b_init_fifo_reset_enable_hw; | 1171 | gops->fifo.reset_enable_hw = gv11b_init_fifo_reset_enable_hw; |
1172 | gops->fifo.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg; | ||
805 | } | 1173 | } |
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h index 5b95ad9e..07a39da0 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h | |||
@@ -15,6 +15,9 @@ | |||
15 | 15 | ||
16 | #ifndef FIFO_GV11B_H | 16 | #ifndef FIFO_GV11B_H |
17 | #define FIFO_GV11B_H | 17 | #define FIFO_GV11B_H |
18 | |||
19 | #define FIFO_INVAL_PBDMA_ID ((u32)~0) | ||
20 | |||
18 | struct gpu_ops; | 21 | struct gpu_ops; |
19 | void gv11b_init_fifo(struct gpu_ops *gops); | 22 | void gv11b_init_fifo(struct gpu_ops *gops); |
20 | #endif | 23 | #endif |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h index b9249128..f05df49e 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h | |||
@@ -318,6 +318,22 @@ static inline u32 fifo_sched_disable_true_v(void) | |||
318 | { | 318 | { |
319 | return 0x00000001; | 319 | return 0x00000001; |
320 | } | 320 | } |
321 | static inline u32 fifo_runlist_preempt_r(void) | ||
322 | { | ||
323 | return 0x00002638; | ||
324 | } | ||
325 | static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i) | ||
326 | { | ||
327 | return (v & 0x1) << (0 + i*1); | ||
328 | } | ||
329 | static inline u32 fifo_runlist_preempt_runlist_m(u32 i) | ||
330 | { | ||
331 | return 0x1 << (0 + i*1); | ||
332 | } | ||
333 | static inline u32 fifo_runlist_preempt_runlist_pending_v(void) | ||
334 | { | ||
335 | return 0x00000001; | ||
336 | } | ||
321 | static inline u32 fifo_preempt_r(void) | 337 | static inline u32 fifo_preempt_r(void) |
322 | { | 338 | { |
323 | return 0x00002634; | 339 | return 0x00002634; |