summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c50
1 files changed, 26 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 382744c7..06db0bb0 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2912,19 +2912,14 @@ void gk20a_fifo_preempt_timeout_rc_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
2912 gk20a_fifo_recover_tsg(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT); 2912 gk20a_fifo_recover_tsg(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT);
2913} 2913}
2914 2914
2915void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 chid) 2915void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, struct channel_gk20a *ch)
2916{ 2916{
2917 struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); 2917 nvgpu_err(g, "preempt channel %d timeout", ch->chid);
2918 2918
2919 nvgpu_err(g, "preempt channel %d timeout", chid); 2919 g->ops.fifo.set_error_notifier(ch,
2920
2921 if (ch != NULL) {
2922 g->ops.fifo.set_error_notifier(ch,
2923 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); 2920 NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
2924 gk20a_fifo_recover_ch(g, ch, true, 2921 gk20a_fifo_recover_ch(g, ch, true,
2925 RC_TYPE_PREEMPT_TIMEOUT); 2922 RC_TYPE_PREEMPT_TIMEOUT);
2926 gk20a_channel_put(ch);
2927 }
2928} 2923}
2929 2924
2930int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) 2925int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
@@ -2945,7 +2940,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
2945 return ret; 2940 return ret;
2946} 2941}
2947 2942
2948int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) 2943int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
2949{ 2944{
2950 struct fifo_gk20a *f = &g->fifo; 2945 struct fifo_gk20a *f = &g->fifo;
2951 u32 ret = 0; 2946 u32 ret = 0;
@@ -2953,10 +2948,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2953 u32 mutex_ret = 0; 2948 u32 mutex_ret = 0;
2954 u32 i; 2949 u32 i;
2955 2950
2956 nvgpu_log_fn(g, "chid: %d", chid); 2951 nvgpu_log_fn(g, "chid: %d", ch->chid);
2957 if (chid == FIFO_INVAL_CHANNEL_ID) {
2958 return 0;
2959 }
2960 2952
2961 /* we have no idea which runlist we are using. lock all */ 2953 /* we have no idea which runlist we are using. lock all */
2962 for (i = 0; i < g->fifo.max_runlists; i++) { 2954 for (i = 0; i < g->fifo.max_runlists; i++) {
@@ -2965,7 +2957,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2965 2957
2966 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2958 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2967 2959
2968 ret = __locked_fifo_preempt(g, chid, false); 2960 ret = __locked_fifo_preempt(g, ch->chid, false);
2969 2961
2970 if (!mutex_ret) { 2962 if (!mutex_ret) {
2971 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2963 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2978,9 +2970,10 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2978 if (ret) { 2970 if (ret) {
2979 if (nvgpu_platform_is_silicon(g)) { 2971 if (nvgpu_platform_is_silicon(g)) {
2980 nvgpu_err(g, "preempt timed out for chid: %u, " 2972 nvgpu_err(g, "preempt timed out for chid: %u, "
2981 "ctxsw timeout will trigger recovery if needed", chid); 2973 "ctxsw timeout will trigger recovery if needed",
2974 ch->chid);
2982 } else { 2975 } else {
2983 gk20a_fifo_preempt_timeout_rc(g, chid); 2976 gk20a_fifo_preempt_timeout_rc(g, ch);
2984 } 2977 }
2985 } 2978 }
2986 2979
@@ -3035,7 +3028,7 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
3035 if (tsg != NULL) { 3028 if (tsg != NULL) {
3036 err = g->ops.fifo.preempt_tsg(ch->g, tsg); 3029 err = g->ops.fifo.preempt_tsg(ch->g, tsg);
3037 } else { 3030 } else {
3038 err = g->ops.fifo.preempt_channel(ch->g, ch->chid); 3031 err = g->ops.fifo.preempt_channel(ch->g, ch);
3039 } 3032 }
3040 3033
3041 return err; 3034 return err;
@@ -3126,8 +3119,9 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
3126 u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; 3119 u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID;
3127 u32 engine_chid = FIFO_INVAL_CHANNEL_ID; 3120 u32 engine_chid = FIFO_INVAL_CHANNEL_ID;
3128 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 3121 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
3129 u32 mutex_ret; 3122 int mutex_ret;
3130 u32 err = 0; 3123 struct channel_gk20a *ch = NULL;
3124 int err = 0;
3131 3125
3132 nvgpu_log_fn(g, " "); 3126 nvgpu_log_fn(g, " ");
3133 3127
@@ -3155,8 +3149,12 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
3155 } 3149 }
3156 3150
3157 if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { 3151 if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) {
3158 err = g->ops.fifo.preempt_channel(g, pbdma_chid); 3152 ch = gk20a_channel_from_id(g, pbdma_chid);
3159 if (err) { 3153 if (ch != NULL) {
3154 err = g->ops.fifo.preempt_channel(g, ch);
3155 gk20a_channel_put(ch);
3156 }
3157 if (err != 0) {
3160 goto clean_up; 3158 goto clean_up;
3161 } 3159 }
3162 } 3160 }
@@ -3173,8 +3171,12 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
3173 } 3171 }
3174 3172
3175 if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { 3173 if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) {
3176 err = g->ops.fifo.preempt_channel(g, engine_chid); 3174 ch = gk20a_channel_from_id(g, engine_chid);
3177 if (err) { 3175 if (ch != NULL) {
3176 err = g->ops.fifo.preempt_channel(g, ch);
3177 gk20a_channel_put(ch);
3178 }
3179 if (err != 0) {
3178 goto clean_up; 3180 goto clean_up;
3179 } 3181 }
3180 } 3182 }