summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorSagar Kamble <skamble@nvidia.com>2021-05-03 13:47:16 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2021-05-04 17:40:45 -0400
commit0d088ad70cb43e54661163971095409c76a79f51 (patch)
treebaca19bf15d1dbb7aadd2bbb2baf5876c65c12b1 /drivers/gpu/nvgpu
parent00c3d98acba40e0ee549a174f212850aa15646a5 (diff)
gpu: nvgpu: wait for stalling interrupts to complete during TSG unbind preempt
Some of the engine stalling interrupts can block the context save off the engine if not handled during fifo.preempt_tsg. They need to be handled while polling for engine ctxsw status. Bug 200711183 Bug 200726848 Change-Id: Ie45d76d9d1d8be3ffb842670843507f2d9aea6d0 Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2521971 (cherry picked from commit I7418a9e0354013b81fbefd8c0cab5068404fc44e) Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2523938 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c24
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h7
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c97
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.h4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gk20a.h2
5 files changed, 90 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index e91830f8..049b8da2 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2981,7 +2981,7 @@ static u32 gk20a_fifo_get_preempt_timeout(struct gk20a *g)
2981} 2981}
2982 2982
2983int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, 2983int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
2984 unsigned int id_type) 2984 unsigned int id_type, bool preempt_retries_left)
2985{ 2985{
2986 struct nvgpu_timeout timeout; 2986 struct nvgpu_timeout timeout;
2987 u32 delay = GR_IDLE_CHECK_DEFAULT; 2987 u32 delay = GR_IDLE_CHECK_DEFAULT;
@@ -3037,7 +3037,8 @@ void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch)
3037 RC_TYPE_PREEMPT_TIMEOUT); 3037 RC_TYPE_PREEMPT_TIMEOUT);
3038} 3038}
3039 3039
3040int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) 3040int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg,
3041 bool preempt_retries_left)
3041{ 3042{
3042 int ret; 3043 int ret;
3043 unsigned int id_type; 3044 unsigned int id_type;
@@ -3049,8 +3050,17 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
3049 3050
3050 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; 3051 id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL;
3051 3052
3052 /* wait for preempt */ 3053 /*
3053 ret = g->ops.fifo.is_preempt_pending(g, id, id_type); 3054 * Poll for preempt done. if stalling interrupts are pending
3055 * while preempt is in progress we poll for stalling interrupts
3056 * to finish based on return value from this function and
3057 * retry preempt again.
3058 * If HW is hung, on the last retry instance we try to identify
3059 * the engines hung and set the runlist reset_eng_bitmask
3060 * and mark preemption completion.
3061 */
3062 ret = g->ops.fifo.is_preempt_pending(g, id, id_type,
3063 preempt_retries_left);
3054 3064
3055 return ret; 3065 return ret;
3056} 3066}
@@ -3072,7 +3082,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
3072 3082
3073 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3083 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3074 3084
3075 ret = __locked_fifo_preempt(g, ch->chid, false); 3085 ret = __locked_fifo_preempt(g, ch->chid, false, false);
3076 3086
3077 if (!mutex_ret) { 3087 if (!mutex_ret) {
3078 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3088 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3112,7 +3122,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
3112 3122
3113 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3123 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
3114 3124
3115 ret = __locked_fifo_preempt(g, tsg->tsgid, true); 3125 ret = __locked_fifo_preempt(g, tsg->tsgid, true, false);
3116 3126
3117 if (!mutex_ret) { 3127 if (!mutex_ret) {
3118 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 3128 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3785,7 +3795,7 @@ static int __locked_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
3785 gk20a_readl(g, fifo_preempt_r())); 3795 gk20a_readl(g, fifo_preempt_r()));
3786#endif 3796#endif
3787 if (wait_preempt) { 3797 if (wait_preempt) {
3788 g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type); 3798 g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type, false);
3789 } 3799 }
3790#ifdef TRACEPOINTS_ENABLED 3800#ifdef TRACEPOINTS_ENABLED
3791 trace_gk20a_reschedule_preempted_next(ch->chid); 3801 trace_gk20a_reschedule_preempted_next(ch->chid);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 26365cae..078236d0 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A graphics fifo (gr host) 2 * GK20A graphics fifo (gr host)
3 * 3 *
4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -388,8 +388,9 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a);
388u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g); 388u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g);
389 389
390int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, 390int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
391 unsigned int id_type); 391 unsigned int id_type, bool preempt_retries_left);
392int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg); 392int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg,
393 bool preempt_retries_left);
393void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg); 394void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
394void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch); 395void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch);
395int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, 396int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index be4d56a8..cc43ee33 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GV11B fifo 2 * GV11B fifo
3 * 3 *
4 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -498,7 +498,8 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
498} 498}
499 499
500static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, 500static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
501 u32 act_eng_id, u32 *reset_eng_bitmask) 501 u32 act_eng_id, u32 *reset_eng_bitmask,
502 bool preempt_retries_left)
502{ 503{
503 struct nvgpu_timeout timeout; 504 struct nvgpu_timeout timeout;
504 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ 505 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
@@ -507,6 +508,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
507 int ret = -EBUSY; 508 int ret = -EBUSY;
508 unsigned int loop_count = 0; 509 unsigned int loop_count = 0;
509 u32 eng_intr_pending; 510 u32 eng_intr_pending;
511 bool check_preempt_retry = false;
510 512
511 /* timeout in milli seconds */ 513 /* timeout in milli seconds */
512 nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g), 514 nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g),
@@ -565,9 +567,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
565 fifo_engine_status_ctx_status_ctxsw_switch_v()) { 567 fifo_engine_status_ctx_status_ctxsw_switch_v()) {
566 /* Eng save hasn't started yet. Continue polling */ 568 /* Eng save hasn't started yet. Continue polling */
567 if (eng_intr_pending) { 569 if (eng_intr_pending) {
568 /* if eng intr, stop polling */ 570 check_preempt_retry = true;
569 *reset_eng_bitmask |= BIT(act_eng_id);
570 ret = 0;
571 break; 571 break;
572 } 572 }
573 573
@@ -578,9 +578,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
578 578
579 if (id == fifo_engine_status_id_v(eng_stat)) { 579 if (id == fifo_engine_status_id_v(eng_stat)) {
580 if (eng_intr_pending) { 580 if (eng_intr_pending) {
581 /* preemption will not finish */ 581 check_preempt_retry = true;
582 *reset_eng_bitmask |= BIT(act_eng_id);
583 ret = 0;
584 break; 582 break;
585 } 583 }
586 } else { 584 } else {
@@ -594,9 +592,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
594 592
595 if (id == fifo_engine_status_next_id_v(eng_stat)) { 593 if (id == fifo_engine_status_next_id_v(eng_stat)) {
596 if (eng_intr_pending) { 594 if (eng_intr_pending) {
597 /* preemption will not finish */ 595 check_preempt_retry = true;
598 *reset_eng_bitmask |= BIT(act_eng_id);
599 ret = 0;
600 break; 596 break;
601 } 597 }
602 } else { 598 } else {
@@ -606,8 +602,13 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
606 } 602 }
607 603
608 } else { 604 } else {
609 /* Preempt should be finished */ 605 if (eng_intr_pending) {
610 ret = 0; 606 check_preempt_retry = true;
607 } else {
608 /* Preempt should be finished */
609 ret = 0;
610 }
611
611 break; 612 break;
612 } 613 }
613 nvgpu_usleep_range(delay, delay * 2); 614 nvgpu_usleep_range(delay, delay * 2);
@@ -615,7 +616,19 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
615 delay << 1, GR_IDLE_CHECK_MAX); 616 delay << 1, GR_IDLE_CHECK_MAX);
616 } while (!nvgpu_timeout_expired(&timeout)); 617 } while (!nvgpu_timeout_expired(&timeout));
617 618
618 if (ret) { 619
620 /* if eng intr, stop polling and check if we can retry preempts. */
621 if (check_preempt_retry) {
622 if (preempt_retries_left) {
623 ret = -EAGAIN;
624 } else {
625 /* preemption will not finish */
626 *reset_eng_bitmask |= BIT32(act_eng_id);
627 ret = 0;
628 }
629 }
630
631 if (ret && ret != -EAGAIN) {
619 /* 632 /*
620 * The reasons a preempt can fail are: 633 * The reasons a preempt can fail are:
621 * 1.Some other stalling interrupt is asserted preventing 634 * 1.Some other stalling interrupt is asserted preventing
@@ -770,7 +783,7 @@ static void gv11b_fifo_issue_runlist_preempt(struct gk20a *g,
770} 783}
771 784
772int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, 785int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
773 unsigned int id_type) 786 unsigned int id_type, bool preempt_retries_left)
774{ 787{
775 struct fifo_gk20a *f = &g->fifo; 788 struct fifo_gk20a *f = &g->fifo;
776 unsigned long runlist_served_pbdmas; 789 unsigned long runlist_served_pbdmas;
@@ -778,7 +791,7 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
778 u32 pbdma_id; 791 u32 pbdma_id;
779 u32 act_eng_id; 792 u32 act_eng_id;
780 u32 runlist_id; 793 u32 runlist_id;
781 int ret = 0; 794 int err, ret = 0;
782 u32 tsgid; 795 u32 tsgid;
783 796
784 if (id_type == ID_TYPE_TSG) { 797 if (id_type == ID_TYPE_TSG) {
@@ -795,14 +808,21 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
795 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; 808 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
796 809
797 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) { 810 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) {
798 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id); 811 err = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
812 if (err != 0) {
813 ret = err;
814 }
799 } 815 }
800 816
801 f->runlist_info[runlist_id].reset_eng_bitmask = 0; 817 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
802 818
803 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) { 819 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) {
804 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, 820 err = gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
805 &f->runlist_info[runlist_id].reset_eng_bitmask); 821 &f->runlist_info[runlist_id].reset_eng_bitmask,
822 preempt_retries_left);
823 if ((err != 0) && (ret == 0)) {
824 ret = err;
825 }
806 } 826 }
807 return ret; 827 return ret;
808} 828}
@@ -847,10 +867,13 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
847int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) 867int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
848{ 868{
849 struct fifo_gk20a *f = &g->fifo; 869 struct fifo_gk20a *f = &g->fifo;
850 u32 ret = 0; 870 int ret = 0;
851 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 871 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
852 u32 mutex_ret = 0; 872 u32 mutex_ret = 0;
853 u32 runlist_id; 873 u32 runlist_id;
874 u32 preempt_retry_count = 10U;
875 u32 preempt_retry_timeout =
876 g->ops.fifo.get_preempt_timeout(g) / preempt_retry_count;
854 877
855 nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid); 878 nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
856 879
@@ -860,23 +883,35 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
860 return 0; 883 return 0;
861 } 884 }
862 885
863 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); 886 do {
887 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
864 888
865 /* WAR for Bug 2065990 */ 889 /* WAR for Bug 2065990 */
866 gk20a_fifo_disable_tsg_sched(g, tsg); 890 gk20a_fifo_disable_tsg_sched(g, tsg);
867 891
868 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 892 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
869 893
870 ret = __locked_fifo_preempt(g, tsg->tsgid, true); 894 ret = __locked_fifo_preempt(g, tsg->tsgid, true,
895 preempt_retry_count > 1U);
871 896
872 if (!mutex_ret) { 897 if (!mutex_ret) {
873 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 898 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
874 } 899 }
900
901 /* WAR for Bug 2065990 */
902 gk20a_fifo_enable_tsg_sched(g, tsg);
875 903
876 /* WAR for Bug 2065990 */ 904 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
877 gk20a_fifo_enable_tsg_sched(g, tsg);
878 905
879 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); 906 if (ret != -EAGAIN) {
907 break;
908 }
909
910 ret = nvgpu_wait_for_stall_interrupts(g, preempt_retry_timeout);
911 if (ret != 0) {
912 nvgpu_log_info(g, "wait for stall interrupts failed %d", ret);
913 }
914 } while (--preempt_retry_count != 0U);
880 915
881 if (ret) { 916 if (ret) {
882 if (nvgpu_platform_is_silicon(g)) { 917 if (nvgpu_platform_is_silicon(g)) {
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
index abbf77a6..4e6bd6ba 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * GV11B Fifo 2 * GV11B Fifo
3 * 3 *
4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -80,7 +80,7 @@ void gv11b_dump_eng_status(struct gk20a *g,
80u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g); 80u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g);
81int gv11b_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next); 81int gv11b_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next);
82int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, 82int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
83 unsigned int id_type); 83 unsigned int id_type, bool preempt_retries_left);
84int gv11b_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch); 84int gv11b_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch);
85int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg); 85int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
86int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg); 86int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
index 3b193dbe..d6d6e939 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
@@ -719,7 +719,7 @@ struct gpu_ops {
719 struct ch_state *ch_state); 719 struct ch_state *ch_state);
720 u32 (*intr_0_error_mask)(struct gk20a *g); 720 u32 (*intr_0_error_mask)(struct gk20a *g);
721 int (*is_preempt_pending)(struct gk20a *g, u32 id, 721 int (*is_preempt_pending)(struct gk20a *g, u32 id,
722 unsigned int id_type); 722 unsigned int id_type, bool preempt_retries_left);
723 void (*init_pbdma_intr_descs)(struct fifo_gk20a *f); 723 void (*init_pbdma_intr_descs)(struct fifo_gk20a *f);
724 int (*reset_enable_hw)(struct gk20a *g); 724 int (*reset_enable_hw)(struct gk20a *g);
725 int (*setup_userd)(struct channel_gk20a *c); 725 int (*setup_userd)(struct channel_gk20a *c);