summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mc/mc_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mc/mc_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/common/mc/mc_gp10b.c224
1 files changed, 224 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mc/mc_gp10b.c b/drivers/gpu/nvgpu/common/mc/mc_gp10b.c
new file mode 100644
index 00000000..a0f26dd3
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/mc/mc_gp10b.c
@@ -0,0 +1,224 @@
1/*
2 * GP10B master
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include <nvgpu/io.h>
27#include <nvgpu/mc.h>
28
29#include "mc_gp10b.h"
30
31#include <nvgpu/atomic.h>
32#include <nvgpu/unit.h>
33
34#include <nvgpu/hw/gp10b/hw_mc_gp10b.h>
35
36#define MAX_MC_INTR_REGS 2U
37
38void mc_gp10b_intr_mask(struct gk20a *g)
39{
40 nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
41 0xffffffffU);
42
43 nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
44 0xffffffffU);
45}
46
47void mc_gp10b_intr_enable(struct gk20a *g)
48{
49 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
50
51 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
52 0xffffffffU);
53 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
54 mc_intr_pfifo_pending_f() |
55 mc_intr_priv_ring_pending_f() |
56 mc_intr_pbus_pending_f() |
57 mc_intr_ltc_pending_f() |
58 mc_intr_replayable_fault_pending_f() |
59 eng_intr_mask;
60 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
61 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
62
63 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
64 0xffffffffU);
65 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
66 mc_intr_pfifo_pending_f() |
67 eng_intr_mask;
68 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
69 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
70}
71
72void mc_gp10b_intr_unit_config(struct gk20a *g, bool enable,
73 bool is_stalling, u32 mask)
74{
75 u32 intr_index = 0;
76 u32 reg = 0;
77
78 intr_index = (is_stalling ? NVGPU_MC_INTR_STALLING :
79 NVGPU_MC_INTR_NONSTALLING);
80 if (enable) {
81 reg = mc_intr_en_set_r(intr_index);
82 g->mc_intr_mask_restore[intr_index] |= mask;
83
84 } else {
85 reg = mc_intr_en_clear_r(intr_index);
86 g->mc_intr_mask_restore[intr_index] &= ~mask;
87 }
88
89 gk20a_writel(g, reg, mask);
90}
91
92void mc_gp10b_isr_stall(struct gk20a *g)
93{
94 u32 mc_intr_0;
95
96 u32 engine_id_idx;
97 u32 active_engine_id = 0;
98 u32 engine_enum = ENGINE_INVAL_GK20A;
99
100 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
101
102 nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0);
103
104 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
105 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
106
107 if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) {
108 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
109 /* GR Engine */
110 if (engine_enum == ENGINE_GR_GK20A) {
111 gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
112 }
113
114 /* CE Engine */
115 if (((engine_enum == ENGINE_GRCE_GK20A) ||
116 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
117 (g->ops.ce2.isr_stall != NULL)) {
118 g->ops.ce2.isr_stall(g,
119 g->fifo.engine_info[active_engine_id].inst_id,
120 g->fifo.engine_info[active_engine_id].pri_base);
121 }
122 }
123 }
124 if ((g->ops.mc.is_intr_hub_pending != NULL) &&
125 g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) {
126 g->ops.fb.hub_isr(g);
127 }
128 if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) {
129 gk20a_fifo_isr(g);
130 }
131 if ((mc_intr_0 & mc_intr_pmu_pending_f()) != 0U) {
132 g->ops.pmu.pmu_isr(g);
133 }
134 if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) {
135 g->ops.priv_ring.isr(g);
136 }
137 if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) {
138 g->ops.ltc.isr(g);
139 }
140 if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) {
141 g->ops.bus.isr(g);
142 }
143 if ((g->ops.mc.is_intr_nvlink_pending != NULL) &&
144 g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) {
145 g->ops.nvlink.isr(g);
146 }
147 if (mc_intr_0 & mc_intr_pfb_pending_f() && g->ops.fb.fbpa_isr) {
148 g->ops.fb.fbpa_isr(g);
149 }
150
151 nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
152
153}
154
155u32 mc_gp10b_intr_stall(struct gk20a *g)
156{
157 return gk20a_readl(g, mc_intr_r(NVGPU_MC_INTR_STALLING));
158}
159
160void mc_gp10b_intr_stall_pause(struct gk20a *g)
161{
162 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffffU);
163}
164
165void mc_gp10b_intr_stall_resume(struct gk20a *g)
166{
167 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
168 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
169}
170
171u32 mc_gp10b_intr_nonstall(struct gk20a *g)
172{
173 return gk20a_readl(g, mc_intr_r(NVGPU_MC_INTR_NONSTALLING));
174}
175
176void mc_gp10b_intr_nonstall_pause(struct gk20a *g)
177{
178 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
179 0xffffffffU);
180}
181
182void mc_gp10b_intr_nonstall_resume(struct gk20a *g)
183{
184 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
185 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
186}
187
188bool mc_gp10b_is_intr1_pending(struct gk20a *g,
189 enum nvgpu_unit unit, u32 mc_intr_1)
190{
191 u32 mask = 0;
192 bool is_pending;
193
194 switch (unit) {
195 case NVGPU_UNIT_FIFO:
196 mask = mc_intr_pfifo_pending_f();
197 break;
198 default:
199 break;
200 }
201
202 if (mask == 0U) {
203 nvgpu_err(g, "unknown unit %d", unit);
204 is_pending = false;
205 } else {
206 is_pending = ((mc_intr_1 & mask) != 0U) ? true : false;
207 }
208
209 return is_pending;
210}
211
212void mc_gp10b_log_pending_intrs(struct gk20a *g)
213{
214 u32 i, intr;
215
216 for (i = 0; i < MAX_MC_INTR_REGS; i++) {
217 intr = nvgpu_readl(g, mc_intr_r(i));
218 if (intr == 0U) {
219 continue;
220 }
221 nvgpu_info(g, "Pending intr%d=0x%08x", i, intr);
222 }
223
224}