summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h18
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h14
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.c80
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.h6
7 files changed, 74 insertions, 56 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c94fc536..7f4a0948 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2672,7 +2672,7 @@ void gk20a_fifo_isr(struct gk20a *g)
2672 return; 2672 return;
2673} 2673}
2674 2674
2675int gk20a_fifo_nonstall_isr(struct gk20a *g) 2675u32 gk20a_fifo_nonstall_isr(struct gk20a *g)
2676{ 2676{
2677 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); 2677 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
2678 u32 clear_intr = 0; 2678 u32 clear_intr = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 576a4ac8..cf3ac167 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -21,8 +21,8 @@
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24#ifndef __FIFO_GK20A_H__ 24#ifndef FIFO_GK20A_H
25#define __FIFO_GK20A_H__ 25#define FIFO_GK20A_H
26 26
27#include "channel_gk20a.h" 27#include "channel_gk20a.h"
28#include "tsg_gk20a.h" 28#include "tsg_gk20a.h"
@@ -103,10 +103,10 @@ struct fifo_runlist_info_gk20a {
103}; 103};
104 104
105enum { 105enum {
106 ENGINE_GR_GK20A = 0, 106 ENGINE_GR_GK20A = 0U,
107 ENGINE_GRCE_GK20A = 1, 107 ENGINE_GRCE_GK20A = 1U,
108 ENGINE_ASYNC_CE_GK20A = 2, 108 ENGINE_ASYNC_CE_GK20A = 2U,
109 ENGINE_INVAL_GK20A 109 ENGINE_INVAL_GK20A = 3U,
110}; 110};
111 111
112struct fifo_pbdma_exception_info_gk20a { 112struct fifo_pbdma_exception_info_gk20a {
@@ -140,7 +140,7 @@ struct fifo_engine_info_gk20a {
140}; 140};
141 141
142enum { 142enum {
143 PROFILE_IOCTL_ENTRY = 0, 143 PROFILE_IOCTL_ENTRY = 0U,
144 PROFILE_ENTRY, 144 PROFILE_ENTRY,
145 PROFILE_JOB_TRACKING, 145 PROFILE_JOB_TRACKING,
146 PROFILE_APPEND, 146 PROFILE_APPEND,
@@ -231,7 +231,7 @@ int gk20a_init_fifo_support(struct gk20a *g);
231int gk20a_init_fifo_setup_hw(struct gk20a *g); 231int gk20a_init_fifo_setup_hw(struct gk20a *g);
232 232
233void gk20a_fifo_isr(struct gk20a *g); 233void gk20a_fifo_isr(struct gk20a *g);
234int gk20a_fifo_nonstall_isr(struct gk20a *g); 234u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
235 235
236int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid); 236int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
237int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 237int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
@@ -454,4 +454,4 @@ void gk20a_fifo_add_sema_cmd(struct gk20a *g,
454 struct nvgpu_semaphore *s, u64 sema_va, 454 struct nvgpu_semaphore *s, u64 sema_va,
455 struct priv_cmd_entry *cmd, 455 struct priv_cmd_entry *cmd,
456 u32 off, bool acquire, bool wfi); 456 u32 off, bool acquire, bool wfi);
457#endif /*__GR_GK20A_H__*/ 457#endif /* FIFO_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 076bf89f..84d3f639 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -1076,7 +1076,7 @@ struct gpu_ops {
1076 u32 (*intr_nonstall)(struct gk20a *g); 1076 u32 (*intr_nonstall)(struct gk20a *g);
1077 void (*intr_nonstall_pause)(struct gk20a *g); 1077 void (*intr_nonstall_pause)(struct gk20a *g);
1078 void (*intr_nonstall_resume)(struct gk20a *g); 1078 void (*intr_nonstall_resume)(struct gk20a *g);
1079 int (*isr_nonstall)(struct gk20a *g); 1079 u32 (*isr_nonstall)(struct gk20a *g);
1080 void (*enable)(struct gk20a *g, u32 units); 1080 void (*enable)(struct gk20a *g, u32 units);
1081 void (*disable)(struct gk20a *g, u32 units); 1081 void (*disable)(struct gk20a *g, u32 units);
1082 void (*reset)(struct gk20a *g, u32 units); 1082 void (*reset)(struct gk20a *g, u32 units);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index c7d028b2..5fd0eb0c 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -6134,18 +6134,18 @@ int gk20a_gr_isr(struct gk20a *g)
6134 return 0; 6134 return 0;
6135} 6135}
6136 6136
6137int gk20a_gr_nonstall_isr(struct gk20a *g) 6137u32 gk20a_gr_nonstall_isr(struct gk20a *g)
6138{ 6138{
6139 int ops = 0; 6139 u32 ops = 0;
6140 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); 6140 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
6141 6141
6142 nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); 6142 nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
6143 6143
6144 if (gr_intr & gr_intr_nonstall_trap_pending_f()) { 6144 if ((gr_intr & gr_intr_nonstall_trap_pending_f()) != 0U) {
6145 /* Clear the interrupt */ 6145 /* Clear the interrupt */
6146 gk20a_writel(g, gr_intr_nonstall_r(), 6146 gk20a_writel(g, gr_intr_nonstall_r(),
6147 gr_intr_nonstall_trap_pending_f()); 6147 gr_intr_nonstall_trap_pending_f());
6148 ops |= (gk20a_nonstall_ops_wakeup_semaphore | 6148 ops |= (u32)(gk20a_nonstall_ops_wakeup_semaphore |
6149 gk20a_nonstall_ops_post_events); 6149 gk20a_nonstall_ops_post_events);
6150 } 6150 }
6151 return ops; 6151 return ops;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index a703ef9d..8c21a714 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -24,6 +24,8 @@
24#ifndef GR_GK20A_H 24#ifndef GR_GK20A_H
25#define GR_GK20A_H 25#define GR_GK20A_H
26 26
27#include <nvgpu/types.h>
28
27#include "gr_ctx_gk20a.h" 29#include "gr_ctx_gk20a.h"
28#include "mm_gk20a.h" 30#include "mm_gk20a.h"
29 31
@@ -566,7 +568,7 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
566int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags); 568int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
567 569
568int gk20a_gr_isr(struct gk20a *g); 570int gk20a_gr_isr(struct gk20a *g);
569int gk20a_gr_nonstall_isr(struct gk20a *g); 571u32 gk20a_gr_nonstall_isr(struct gk20a *g);
570 572
571/* zcull */ 573/* zcull */
572u32 gr_gk20a_get_ctxsw_zcull_size(struct gk20a *g, struct gr_gk20a *gr); 574u32 gr_gk20a_get_ctxsw_zcull_size(struct gk20a *g, struct gr_gk20a *gr);
@@ -603,15 +605,17 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
603#define gr_gk20a_elpg_protected_call(g, func) \ 605#define gr_gk20a_elpg_protected_call(g, func) \
604 ({ \ 606 ({ \
605 int err = 0; \ 607 int err = 0; \
606 if (g->support_pmu && g->elpg_enabled) {\ 608 if ((g->support_pmu) && (g->elpg_enabled)) {\
607 err = nvgpu_pmu_disable_elpg(g); \ 609 err = nvgpu_pmu_disable_elpg(g); \
608 if (err) \ 610 if (err != 0) {\
609 nvgpu_pmu_enable_elpg(g); \ 611 nvgpu_pmu_enable_elpg(g); \
612 } \
610 } \ 613 } \
611 if (!err) { \ 614 if (err == 0) { \
612 err = func; \ 615 err = func; \
613 if (g->support_pmu && g->elpg_enabled) \ 616 if ((g->support_pmu) && (g->elpg_enabled)) {\
614 nvgpu_pmu_enable_elpg(g); \ 617 nvgpu_pmu_enable_elpg(g); \
618 } \
615 } \ 619 } \
616 err; \ 620 err; \
617 }) 621 })
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
index 9473ad4f..69d165af 100644
--- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
@@ -45,7 +45,7 @@ void mc_gk20a_isr_stall(struct gk20a *g)
45 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { 45 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
46 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 46 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
47 47
48 if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) { 48 if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) {
49 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; 49 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
50 /* GR Engine */ 50 /* GR Engine */
51 if (engine_enum == ENGINE_GR_GK20A) { 51 if (engine_enum == ENGINE_GR_GK20A) {
@@ -55,28 +55,33 @@ void mc_gk20a_isr_stall(struct gk20a *g)
55 /* CE Engine */ 55 /* CE Engine */
56 if (((engine_enum == ENGINE_GRCE_GK20A) || 56 if (((engine_enum == ENGINE_GRCE_GK20A) ||
57 (engine_enum == ENGINE_ASYNC_CE_GK20A)) && 57 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
58 g->ops.ce2.isr_stall){ 58 (g->ops.ce2.isr_stall != NULL)) {
59 g->ops.ce2.isr_stall(g, 59 g->ops.ce2.isr_stall(g,
60 g->fifo.engine_info[active_engine_id].inst_id, 60 g->fifo.engine_info[active_engine_id].inst_id,
61 g->fifo.engine_info[active_engine_id].pri_base); 61 g->fifo.engine_info[active_engine_id].pri_base);
62 } 62 }
63 } 63 }
64 } 64 }
65 if (mc_intr_0 & mc_intr_0_pfifo_pending_f()) 65 if ((mc_intr_0 & mc_intr_0_pfifo_pending_f()) != 0U) {
66 gk20a_fifo_isr(g); 66 gk20a_fifo_isr(g);
67 if (mc_intr_0 & mc_intr_0_pmu_pending_f()) 67 }
68 if ((mc_intr_0 & mc_intr_0_pmu_pending_f()) != 0U) {
68 gk20a_pmu_isr(g); 69 gk20a_pmu_isr(g);
69 if (mc_intr_0 & mc_intr_0_priv_ring_pending_f()) 70 }
71 if ((mc_intr_0 & mc_intr_0_priv_ring_pending_f()) != 0U) {
70 g->ops.priv_ring.isr(g); 72 g->ops.priv_ring.isr(g);
71 if (mc_intr_0 & mc_intr_0_ltc_pending_f()) 73 }
74 if ((mc_intr_0 & mc_intr_0_ltc_pending_f()) != 0U) {
72 g->ops.ltc.isr(g); 75 g->ops.ltc.isr(g);
73 if (mc_intr_0 & mc_intr_0_pbus_pending_f()) 76 }
77 if ((mc_intr_0 & mc_intr_0_pbus_pending_f()) != 0U) {
74 g->ops.bus.isr(g); 78 g->ops.bus.isr(g);
79 }
75} 80}
76 81
77int mc_gk20a_isr_nonstall(struct gk20a *g) 82u32 mc_gk20a_isr_nonstall(struct gk20a *g)
78{ 83{
79 int ops = 0; 84 u32 ops = 0;
80 u32 mc_intr_1; 85 u32 mc_intr_1;
81 u32 engine_id_idx; 86 u32 engine_id_idx;
82 u32 active_engine_id = 0; 87 u32 active_engine_id = 0;
@@ -84,8 +89,9 @@ int mc_gk20a_isr_nonstall(struct gk20a *g)
84 89
85 mc_intr_1 = g->ops.mc.intr_nonstall(g); 90 mc_intr_1 = g->ops.mc.intr_nonstall(g);
86 91
87 if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1)) 92 if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1) != 0U) {
88 ops |= gk20a_fifo_nonstall_isr(g); 93 ops |= gk20a_fifo_nonstall_isr(g);
94 }
89 95
90 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; 96 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;
91 engine_id_idx++) { 97 engine_id_idx++) {
@@ -94,19 +100,20 @@ int mc_gk20a_isr_nonstall(struct gk20a *g)
94 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 100 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
95 engine_info = &g->fifo.engine_info[active_engine_id]; 101 engine_info = &g->fifo.engine_info[active_engine_id];
96 102
97 if (mc_intr_1 & engine_info->intr_mask) { 103 if ((mc_intr_1 & engine_info->intr_mask) != 0U) {
98 engine_enum = engine_info->engine_enum; 104 engine_enum = engine_info->engine_enum;
99 /* GR Engine */ 105 /* GR Engine */
100 if (engine_enum == ENGINE_GR_GK20A) 106 if (engine_enum == ENGINE_GR_GK20A) {
101 ops |= gk20a_gr_nonstall_isr(g); 107 ops |= gk20a_gr_nonstall_isr(g);
102 108 }
103 /* CE Engine */ 109 /* CE Engine */
104 if (((engine_enum == ENGINE_GRCE_GK20A) || 110 if (((engine_enum == ENGINE_GRCE_GK20A) ||
105 (engine_enum == ENGINE_ASYNC_CE_GK20A)) && 111 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
106 g->ops.ce2.isr_nonstall) 112 (g->ops.ce2.isr_nonstall != NULL)) {
107 ops |= g->ops.ce2.isr_nonstall(g, 113 ops |= g->ops.ce2.isr_nonstall(g,
108 engine_info->inst_id, 114 engine_info->inst_id,
109 engine_info->pri_base); 115 engine_info->pri_base);
116 }
110 } 117 }
111 } 118 }
112 119
@@ -219,7 +226,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
219 pmc = gk20a_readl(g, mc_enable_r()); 226 pmc = gk20a_readl(g, mc_enable_r());
220 pmc |= units; 227 pmc |= units;
221 gk20a_writel(g, mc_enable_r(), pmc); 228 gk20a_writel(g, mc_enable_r(), pmc);
222 gk20a_readl(g, mc_enable_r()); 229 pmc = gk20a_readl(g, mc_enable_r());
223 nvgpu_spinlock_release(&g->mc_enable_lock); 230 nvgpu_spinlock_release(&g->mc_enable_lock);
224 231
225 nvgpu_udelay(20); 232 nvgpu_udelay(20);
@@ -228,10 +235,11 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
228void gk20a_mc_reset(struct gk20a *g, u32 units) 235void gk20a_mc_reset(struct gk20a *g, u32 units)
229{ 236{
230 g->ops.mc.disable(g, units); 237 g->ops.mc.disable(g, units);
231 if (units & gk20a_fifo_get_all_ce_engine_reset_mask(g)) 238 if ((units & gk20a_fifo_get_all_ce_engine_reset_mask(g)) != 0U) {
232 nvgpu_udelay(500); 239 nvgpu_udelay(500);
233 else 240 } else {
234 nvgpu_udelay(20); 241 nvgpu_udelay(20);
242 }
235 g->ops.mc.enable(g, units); 243 g->ops.mc.enable(g, units);
236} 244}
237 245
@@ -239,19 +247,22 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
239{ 247{
240 u32 val = __nvgpu_readl(g, mc_boot_0_r()); 248 u32 val = __nvgpu_readl(g, mc_boot_0_r());
241 249
242 if (val == 0xffffffff) 250 if (val != 0xffffffffU) {
243 return val;
244 251
245 if (arch) 252 if (arch != NULL) {
246 *arch = mc_boot_0_architecture_v(val) << 253 *arch = mc_boot_0_architecture_v(val) <<
247 NVGPU_GPU_ARCHITECTURE_SHIFT; 254 NVGPU_GPU_ARCHITECTURE_SHIFT;
255 }
248 256
249 if (impl) 257 if (impl != NULL) {
250 *impl = mc_boot_0_implementation_v(val); 258 *impl = mc_boot_0_implementation_v(val);
259 }
251 260
252 if (rev) 261 if (rev != NULL) {
253 *rev = (mc_boot_0_major_revision_v(val) << 4) | 262 *rev = (mc_boot_0_major_revision_v(val) << 4) |
254 mc_boot_0_minor_revision_v(val); 263 mc_boot_0_minor_revision_v(val);
264 }
265 }
255 266
256 return val; 267 return val;
257} 268}
@@ -259,7 +270,7 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
259bool mc_gk20a_is_intr1_pending(struct gk20a *g, 270bool mc_gk20a_is_intr1_pending(struct gk20a *g,
260 enum nvgpu_unit unit, u32 mc_intr_1) 271 enum nvgpu_unit unit, u32 mc_intr_1)
261{ 272{
262 u32 mask = 0; 273 u32 mask = 0U;
263 bool is_pending; 274 bool is_pending;
264 275
265 switch (unit) { 276 switch (unit) {
@@ -270,11 +281,11 @@ bool mc_gk20a_is_intr1_pending(struct gk20a *g,
270 break; 281 break;
271 } 282 }
272 283
273 if (mask == 0) { 284 if (mask == 0U) {
274 nvgpu_err(g, "unknown unit %d", unit); 285 nvgpu_err(g, "unknown unit %d", unit);
275 is_pending = false; 286 is_pending = false;
276 } else { 287 } else {
277 is_pending = (mc_intr_1 & mask) ? true : false; 288 is_pending = ((mc_intr_1 & mask) != 0U) ? true : false;
278 } 289 }
279 290
280 return is_pending; 291 return is_pending;
@@ -284,9 +295,12 @@ void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops)
284{ 295{
285 bool semaphore_wakeup, post_events; 296 bool semaphore_wakeup, post_events;
286 297
287 semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore; 298 semaphore_wakeup = (((ops & (u32)gk20a_nonstall_ops_wakeup_semaphore) != 0U) ?
288 post_events = ops & gk20a_nonstall_ops_post_events; 299 true : false);
300 post_events = (((ops & (u32)gk20a_nonstall_ops_post_events) != 0U) ?
301 true: false);
289 302
290 if (semaphore_wakeup) 303 if (semaphore_wakeup) {
291 g->ops.semaphore_wakeup(g, post_events); 304 g->ops.semaphore_wakeup(g, post_events);
305 }
292} 306}
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.h b/drivers/gpu/nvgpu/gk20a/mc_gk20a.h
index 1ce308b8..1b59d634 100644
--- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ u32 mc_gk20a_intr_stall(struct gk20a *g);
32void mc_gk20a_intr_stall_pause(struct gk20a *g); 32void mc_gk20a_intr_stall_pause(struct gk20a *g);
33void mc_gk20a_intr_stall_resume(struct gk20a *g); 33void mc_gk20a_intr_stall_resume(struct gk20a *g);
34u32 mc_gk20a_intr_nonstall(struct gk20a *g); 34u32 mc_gk20a_intr_nonstall(struct gk20a *g);
35int mc_gk20a_isr_nonstall(struct gk20a *g); 35u32 mc_gk20a_isr_nonstall(struct gk20a *g);
36void mc_gk20a_intr_nonstall_pause(struct gk20a *g); 36void mc_gk20a_intr_nonstall_pause(struct gk20a *g);
37void mc_gk20a_intr_nonstall_resume(struct gk20a *g); 37void mc_gk20a_intr_nonstall_resume(struct gk20a *g);
38void gk20a_mc_enable(struct gk20a *g, u32 units); 38void gk20a_mc_enable(struct gk20a *g, u32 units);
@@ -42,4 +42,4 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev);
42bool mc_gk20a_is_intr1_pending(struct gk20a *g, 42bool mc_gk20a_is_intr1_pending(struct gk20a *g,
43 enum nvgpu_unit unit, u32 mc_intr_1); 43 enum nvgpu_unit unit, u32 mc_intr_1);
44void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops); 44void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops);
45#endif 45#endif /* MC_GK20A_H */