summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mc_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mc_gp10b.c202
1 files changed, 202 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
new file mode 100644
index 00000000..eda961b6
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
@@ -0,0 +1,202 @@
1/*
2 * GP20B master
3 *
4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17
18#include "gk20a/gk20a.h"
19#include "mc_gp10b.h"
20#include "hw_mc_gp10b.h"
21
22void mc_gp10b_intr_enable(struct gk20a *g)
23{
24 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
25
26 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
27 0xffffffff);
28 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING] =
29 mc_intr_pfifo_pending_f()
30 | mc_intr_replayable_fault_pending_f()
31 | eng_intr_mask;
32 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
33 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING]);
34
35 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
36 0xffffffff);
37 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
38 mc_intr_pfifo_pending_f()
39 | mc_intr_priv_ring_pending_f()
40 | mc_intr_ltc_pending_f()
41 | mc_intr_pbus_pending_f()
42 | eng_intr_mask;
43 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
44 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
45}
46
47void mc_gp10b_intr_unit_config(struct gk20a *g, bool enable,
48 bool is_stalling, u32 mask)
49{
50 u32 intr_index = 0;
51 u32 reg = 0;
52
53 intr_index = (is_stalling ? NVGPU_MC_INTR_STALLING :
54 NVGPU_MC_INTR_NONSTALLING);
55 if (enable) {
56 reg = mc_intr_en_set_r(intr_index);
57 g->ops.mc.intr_mask_restore[intr_index] |= mask;
58
59 } else {
60 reg = mc_intr_en_clear_r(intr_index);
61 g->ops.mc.intr_mask_restore[intr_index] &= ~mask;
62 }
63
64 gk20a_writel(g, reg, mask);
65}
66
67irqreturn_t mc_gp10b_isr_stall(struct gk20a *g)
68{
69 u32 mc_intr_0;
70
71 if (!g->power_on)
72 return IRQ_NONE;
73
74 /* not from gpu when sharing irq with others */
75 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
76 if (unlikely(!mc_intr_0))
77 return IRQ_NONE;
78
79 gk20a_writel(g, mc_intr_en_clear_r(0), 0xffffffff);
80
81 return IRQ_WAKE_THREAD;
82}
83
84irqreturn_t mc_gp10b_isr_nonstall(struct gk20a *g)
85{
86 u32 mc_intr_1;
87
88 if (!g->power_on)
89 return IRQ_NONE;
90
91 /* not from gpu when sharing irq with others */
92 mc_intr_1 = gk20a_readl(g, mc_intr_r(1));
93 if (unlikely(!mc_intr_1))
94 return IRQ_NONE;
95
96 gk20a_writel(g, mc_intr_en_clear_r(1), 0xffffffff);
97
98 return IRQ_WAKE_THREAD;
99}
100
101irqreturn_t mc_gp10b_intr_thread_stall(struct gk20a *g)
102{
103 u32 mc_intr_0;
104 u32 engine_id_idx;
105 u32 active_engine_id = 0;
106 u32 engine_enum = ENGINE_INVAL_GK20A;
107
108 gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
109
110 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
111
112 gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
113
114 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
115 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
116
117 if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) {
118 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
119 /* GR Engine */
120 if (engine_enum == ENGINE_GR_GK20A) {
121 gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
122 }
123
124 /* CE Engine */
125 if (((engine_enum == ENGINE_GRCE_GK20A) ||
126 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
127 g->ops.ce2.isr_stall){
128 g->ops.ce2.isr_stall(g,
129 g->fifo.engine_info[active_engine_id].inst_id,
130 g->fifo.engine_info[active_engine_id].pri_base);
131 }
132 }
133 }
134 if (mc_intr_0 & mc_intr_pfifo_pending_f())
135 gk20a_fifo_isr(g);
136 if (mc_intr_0 & mc_intr_pmu_pending_f())
137 gk20a_pmu_isr(g);
138 if (mc_intr_0 & mc_intr_priv_ring_pending_f())
139 gk20a_priv_ring_isr(g);
140 if (mc_intr_0 & mc_intr_ltc_pending_f())
141 g->ops.ltc.isr(g);
142 if (mc_intr_0 & mc_intr_pbus_pending_f())
143 gk20a_pbus_isr(g);
144
145 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
146 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING]);
147
148 return IRQ_HANDLED;
149}
150
151irqreturn_t mc_gp10b_intr_thread_nonstall(struct gk20a *g)
152{
153 u32 mc_intr_1;
154 u32 engine_id_idx;
155 u32 active_engine_id = 0;
156 u32 engine_enum = ENGINE_INVAL_GK20A;
157
158 gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
159
160 mc_intr_1 = gk20a_readl(g, mc_intr_r(1));
161
162 gk20a_dbg(gpu_dbg_intr, "non-stall intr %08x\n", mc_intr_1);
163
164 if (mc_intr_1 & mc_intr_pfifo_pending_f())
165 gk20a_fifo_nonstall_isr(g);
166
167 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
168 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
169
170 if (mc_intr_1 & g->fifo.engine_info[active_engine_id].intr_mask) {
171 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
172 /* GR Engine */
173 if (engine_enum == ENGINE_GR_GK20A) {
174 gk20a_gr_nonstall_isr(g);
175 }
176
177 /* CE Engine */
178 if (((engine_enum == ENGINE_GRCE_GK20A) ||
179 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
180 g->ops.ce2.isr_nonstall) {
181 g->ops.ce2.isr_nonstall(g,
182 g->fifo.engine_info[active_engine_id].inst_id,
183 g->fifo.engine_info[active_engine_id].pri_base);
184 }
185 }
186 }
187
188 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
189 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
190
191 return IRQ_HANDLED;
192}
193
194void gp10b_init_mc(struct gpu_ops *gops)
195{
196 gops->mc.intr_enable = mc_gp10b_intr_enable;
197 gops->mc.intr_unit_config = mc_gp10b_intr_unit_config;
198 gops->mc.isr_stall = mc_gp10b_isr_stall;
199 gops->mc.isr_nonstall = mc_gp10b_isr_nonstall;
200 gops->mc.isr_thread_stall = mc_gp10b_intr_thread_stall;
201 gops->mc.isr_thread_nonstall = mc_gp10b_intr_thread_nonstall;
202}