summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gp10b/mc_gp10b.c12
3 files changed, 13 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
index caab6b5e..a1861b0d 100644
--- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GK20A Master Control 2 * GK20A Master Control
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -121,12 +121,6 @@ irqreturn_t mc_gk20a_intr_thread_stall(struct gk20a *g)
121 121
122 gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); 122 gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
123 123
124 /* handle critical interrupts first */
125 if (mc_intr_0 & mc_intr_0_pbus_pending_f())
126 gk20a_pbus_isr(g);
127 if (mc_intr_0 & mc_intr_0_priv_ring_pending_f())
128 gk20a_priv_ring_isr(g);
129
130 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { 124 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
131 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 125 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
132 126
@@ -151,8 +145,12 @@ irqreturn_t mc_gk20a_intr_thread_stall(struct gk20a *g)
151 gk20a_fifo_isr(g); 145 gk20a_fifo_isr(g);
152 if (mc_intr_0 & mc_intr_0_pmu_pending_f()) 146 if (mc_intr_0 & mc_intr_0_pmu_pending_f())
153 gk20a_pmu_isr(g); 147 gk20a_pmu_isr(g);
148 if (mc_intr_0 & mc_intr_0_priv_ring_pending_f())
149 gk20a_priv_ring_isr(g);
154 if (mc_intr_0 & mc_intr_0_ltc_pending_f()) 150 if (mc_intr_0 & mc_intr_0_ltc_pending_f())
155 g->ops.ltc.isr(g); 151 g->ops.ltc.isr(g);
152 if (mc_intr_0 & mc_intr_0_pbus_pending_f())
153 gk20a_pbus_isr(g);
156 154
157 /* sync handled irq counter before re-enabling interrupts */ 155 /* sync handled irq counter before re-enabling interrupts */
158 atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count); 156 atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count);
diff --git a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
index 90bd95ac..a44df1e8 100644
--- a/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/priv_ring_gk20a.c
@@ -76,6 +76,8 @@ void gk20a_priv_ring_isr(struct gk20a *g)
76 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 76 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
77 struct gk20a_platform *platform = dev_get_drvdata(g->dev); 77 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
78 78
79 if (platform->is_fmodel)
80 return;
79 81
80 status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); 82 status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());
81 status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); 83 status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r());
@@ -88,6 +90,7 @@ void gk20a_priv_ring_isr(struct gk20a *g)
88 pri_ringmaster_intr_status0_overflow_fault_v(status0) != 0) { 90 pri_ringmaster_intr_status0_overflow_fault_v(status0) != 0) {
89 gk20a_reset_priv_ring(g); 91 gk20a_reset_priv_ring(g);
90 } 92 }
93
91 if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { 94 if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) {
92 gk20a_dbg(gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", 95 gk20a_dbg(gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x",
93 gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), 96 gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()),
@@ -106,9 +109,6 @@ void gk20a_priv_ring_isr(struct gk20a *g)
106 } 109 }
107 } 110 }
108 111
109 if (platform->is_fmodel)
110 return;
111
112 cmd = gk20a_readl(g, pri_ringmaster_command_r()); 112 cmd = gk20a_readl(g, pri_ringmaster_command_r());
113 cmd = set_field(cmd, pri_ringmaster_command_cmd_m(), 113 cmd = set_field(cmd, pri_ringmaster_command_cmd_m(),
114 pri_ringmaster_command_cmd_ack_interrupt_f()); 114 pri_ringmaster_command_cmd_ack_interrupt_f());
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
index dfcbe398..abbd2191 100644
--- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP20B master 2 * GP20B master
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -133,12 +133,6 @@ irqreturn_t mc_gp10b_intr_thread_stall(struct gk20a *g)
133 133
134 gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); 134 gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
135 135
136 /* handle critical interrupts first */
137 if (mc_intr_0 & mc_intr_pbus_pending_f())
138 gk20a_pbus_isr(g);
139 if (mc_intr_0 & mc_intr_priv_ring_pending_f())
140 gk20a_priv_ring_isr(g);
141
142 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { 136 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
143 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 137 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
144 138
@@ -163,8 +157,12 @@ irqreturn_t mc_gp10b_intr_thread_stall(struct gk20a *g)
163 gk20a_fifo_isr(g); 157 gk20a_fifo_isr(g);
164 if (mc_intr_0 & mc_intr_pmu_pending_f()) 158 if (mc_intr_0 & mc_intr_pmu_pending_f())
165 gk20a_pmu_isr(g); 159 gk20a_pmu_isr(g);
160 if (mc_intr_0 & mc_intr_priv_ring_pending_f())
161 gk20a_priv_ring_isr(g);
166 if (mc_intr_0 & mc_intr_ltc_pending_f()) 162 if (mc_intr_0 & mc_intr_ltc_pending_f())
167 g->ops.ltc.isr(g); 163 g->ops.ltc.isr(g);
164 if (mc_intr_0 & mc_intr_pbus_pending_f())
165 gk20a_pbus_isr(g);
168 166
169 /* sync handled irq counter before re-enabling interrupts */ 167 /* sync handled irq counter before re-enabling interrupts */
170 atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count); 168 atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count);