summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mc_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mc_gp10b.c223
1 files changed, 0 insertions, 223 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
deleted file mode 100644
index 033d02c5..00000000
--- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
+++ /dev/null
@@ -1,223 +0,0 @@
1/*
2 * GP10B master
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include <nvgpu/io.h>
27
28#include "mc_gp10b.h"
29
30#include <nvgpu/atomic.h>
31#include <nvgpu/unit.h>
32
33#include <nvgpu/hw/gp10b/hw_mc_gp10b.h>
34
35#define MAX_MC_INTR_REGS 2U
36
37void mc_gp10b_intr_mask(struct gk20a *g)
38{
39 nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
40 0xffffffffU);
41
42 nvgpu_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
43 0xffffffffU);
44}
45
46void mc_gp10b_intr_enable(struct gk20a *g)
47{
48 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
49
50 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
51 0xffffffffU);
52 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
53 mc_intr_pfifo_pending_f() |
54 mc_intr_priv_ring_pending_f() |
55 mc_intr_pbus_pending_f() |
56 mc_intr_ltc_pending_f() |
57 mc_intr_replayable_fault_pending_f() |
58 eng_intr_mask;
59 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
60 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
61
62 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
63 0xffffffffU);
64 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
65 mc_intr_pfifo_pending_f() |
66 eng_intr_mask;
67 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
68 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
69}
70
71void mc_gp10b_intr_unit_config(struct gk20a *g, bool enable,
72 bool is_stalling, u32 mask)
73{
74 u32 intr_index = 0;
75 u32 reg = 0;
76
77 intr_index = (is_stalling ? NVGPU_MC_INTR_STALLING :
78 NVGPU_MC_INTR_NONSTALLING);
79 if (enable) {
80 reg = mc_intr_en_set_r(intr_index);
81 g->mc_intr_mask_restore[intr_index] |= mask;
82
83 } else {
84 reg = mc_intr_en_clear_r(intr_index);
85 g->mc_intr_mask_restore[intr_index] &= ~mask;
86 }
87
88 gk20a_writel(g, reg, mask);
89}
90
91void mc_gp10b_isr_stall(struct gk20a *g)
92{
93 u32 mc_intr_0;
94
95 u32 engine_id_idx;
96 u32 active_engine_id = 0;
97 u32 engine_enum = ENGINE_INVAL_GK20A;
98
99 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
100
101 nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0);
102
103 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
104 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
105
106 if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) {
107 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
108 /* GR Engine */
109 if (engine_enum == ENGINE_GR_GK20A) {
110 gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
111 }
112
113 /* CE Engine */
114 if (((engine_enum == ENGINE_GRCE_GK20A) ||
115 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
116 (g->ops.ce2.isr_stall != NULL)) {
117 g->ops.ce2.isr_stall(g,
118 g->fifo.engine_info[active_engine_id].inst_id,
119 g->fifo.engine_info[active_engine_id].pri_base);
120 }
121 }
122 }
123 if ((g->ops.mc.is_intr_hub_pending != NULL) &&
124 g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) {
125 g->ops.fb.hub_isr(g);
126 }
127 if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) {
128 gk20a_fifo_isr(g);
129 }
130 if ((mc_intr_0 & mc_intr_pmu_pending_f()) != 0U) {
131 g->ops.pmu.pmu_isr(g);
132 }
133 if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) {
134 g->ops.priv_ring.isr(g);
135 }
136 if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) {
137 g->ops.ltc.isr(g);
138 }
139 if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) {
140 g->ops.bus.isr(g);
141 }
142 if ((g->ops.mc.is_intr_nvlink_pending != NULL) &&
143 g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) {
144 g->ops.nvlink.isr(g);
145 }
146 if (mc_intr_0 & mc_intr_pfb_pending_f() && g->ops.fb.fbpa_isr) {
147 g->ops.fb.fbpa_isr(g);
148 }
149
150 nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
151
152}
153
154u32 mc_gp10b_intr_stall(struct gk20a *g)
155{
156 return gk20a_readl(g, mc_intr_r(NVGPU_MC_INTR_STALLING));
157}
158
159void mc_gp10b_intr_stall_pause(struct gk20a *g)
160{
161 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffffU);
162}
163
164void mc_gp10b_intr_stall_resume(struct gk20a *g)
165{
166 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
167 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
168}
169
170u32 mc_gp10b_intr_nonstall(struct gk20a *g)
171{
172 return gk20a_readl(g, mc_intr_r(NVGPU_MC_INTR_NONSTALLING));
173}
174
175void mc_gp10b_intr_nonstall_pause(struct gk20a *g)
176{
177 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
178 0xffffffffU);
179}
180
181void mc_gp10b_intr_nonstall_resume(struct gk20a *g)
182{
183 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
184 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
185}
186
187bool mc_gp10b_is_intr1_pending(struct gk20a *g,
188 enum nvgpu_unit unit, u32 mc_intr_1)
189{
190 u32 mask = 0;
191 bool is_pending;
192
193 switch (unit) {
194 case NVGPU_UNIT_FIFO:
195 mask = mc_intr_pfifo_pending_f();
196 break;
197 default:
198 break;
199 }
200
201 if (mask == 0U) {
202 nvgpu_err(g, "unknown unit %d", unit);
203 is_pending = false;
204 } else {
205 is_pending = ((mc_intr_1 & mask) != 0U) ? true : false;
206 }
207
208 return is_pending;
209}
210
211void mc_gp10b_log_pending_intrs(struct gk20a *g)
212{
213 u32 i, intr;
214
215 for (i = 0; i < MAX_MC_INTR_REGS; i++) {
216 intr = nvgpu_readl(g, mc_intr_r(i));
217 if (intr == 0U) {
218 continue;
219 }
220 nvgpu_info(g, "Pending intr%d=0x%08x", i, intr);
221 }
222
223}