summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mc_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mc_gp10b.c185
1 files changed, 185 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
new file mode 100644
index 00000000..9aea76f9
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
@@ -0,0 +1,185 @@
1/*
2 * GP10B master
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "gk20a/mc_gk20a.h"
27
28#include "mc_gp10b.h"
29
30#include <nvgpu/atomic.h>
31#include <nvgpu/unit.h>
32
33#include <nvgpu/hw/gp10b/hw_mc_gp10b.h>
34
35void mc_gp10b_intr_enable(struct gk20a *g)
36{
37 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
38
39 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
40 0xffffffff);
41 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
42 mc_intr_pfifo_pending_f() |
43 mc_intr_priv_ring_pending_f() |
44 mc_intr_pbus_pending_f() |
45 mc_intr_ltc_pending_f() |
46 mc_intr_replayable_fault_pending_f() |
47 eng_intr_mask;
48 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
49 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
50
51 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
52 0xffffffff);
53 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
54 mc_intr_pfifo_pending_f() |
55 eng_intr_mask;
56 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
57 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
58}
59
60void mc_gp10b_intr_unit_config(struct gk20a *g, bool enable,
61 bool is_stalling, u32 mask)
62{
63 u32 intr_index = 0;
64 u32 reg = 0;
65
66 intr_index = (is_stalling ? NVGPU_MC_INTR_STALLING :
67 NVGPU_MC_INTR_NONSTALLING);
68 if (enable) {
69 reg = mc_intr_en_set_r(intr_index);
70 g->mc_intr_mask_restore[intr_index] |= mask;
71
72 } else {
73 reg = mc_intr_en_clear_r(intr_index);
74 g->mc_intr_mask_restore[intr_index] &= ~mask;
75 }
76
77 gk20a_writel(g, reg, mask);
78}
79
80void mc_gp10b_isr_stall(struct gk20a *g)
81{
82 u32 mc_intr_0;
83
84 u32 engine_id_idx;
85 u32 active_engine_id = 0;
86 u32 engine_enum = ENGINE_INVAL_GK20A;
87
88 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
89
90 gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0);
91
92 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
93 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
94
95 if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) {
96 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
97 /* GR Engine */
98 if (engine_enum == ENGINE_GR_GK20A) {
99 gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
100 }
101
102 /* CE Engine */
103 if (((engine_enum == ENGINE_GRCE_GK20A) ||
104 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
105 g->ops.ce2.isr_stall){
106 g->ops.ce2.isr_stall(g,
107 g->fifo.engine_info[active_engine_id].inst_id,
108 g->fifo.engine_info[active_engine_id].pri_base);
109 }
110 }
111 }
112 if (g->ops.mc.is_intr_hub_pending &&
113 g->ops.mc.is_intr_hub_pending(g, mc_intr_0))
114 g->ops.fb.hub_isr(g);
115 if (mc_intr_0 & mc_intr_pfifo_pending_f())
116 gk20a_fifo_isr(g);
117 if (mc_intr_0 & mc_intr_pmu_pending_f())
118 gk20a_pmu_isr(g);
119 if (mc_intr_0 & mc_intr_priv_ring_pending_f())
120 g->ops.priv_ring.isr(g);
121 if (mc_intr_0 & mc_intr_ltc_pending_f())
122 g->ops.ltc.isr(g);
123 if (mc_intr_0 & mc_intr_pbus_pending_f())
124 g->ops.bus.isr(g);
125
126 gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
127
128}
129
130u32 mc_gp10b_intr_stall(struct gk20a *g)
131{
132 return gk20a_readl(g, mc_intr_r(NVGPU_MC_INTR_STALLING));
133}
134
135void mc_gp10b_intr_stall_pause(struct gk20a *g)
136{
137 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffff);
138}
139
140void mc_gp10b_intr_stall_resume(struct gk20a *g)
141{
142 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
143 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
144}
145
146u32 mc_gp10b_intr_nonstall(struct gk20a *g)
147{
148 return gk20a_readl(g, mc_intr_r(NVGPU_MC_INTR_NONSTALLING));
149}
150
151void mc_gp10b_intr_nonstall_pause(struct gk20a *g)
152{
153 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
154 0xffffffff);
155}
156
157void mc_gp10b_intr_nonstall_resume(struct gk20a *g)
158{
159 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
160 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
161}
162
163bool mc_gp10b_is_intr1_pending(struct gk20a *g,
164 enum nvgpu_unit unit, u32 mc_intr_1)
165{
166 u32 mask = 0;
167 bool is_pending;
168
169 switch (unit) {
170 case NVGPU_UNIT_FIFO:
171 mask = mc_intr_pfifo_pending_f();
172 break;
173 default:
174 break;
175 }
176
177 if (mask == 0) {
178 nvgpu_err(g, "unknown unit %d", unit);
179 is_pending = false;
180 } else {
181 is_pending = (mc_intr_1 & mask) ? true : false;
182 }
183
184 return is_pending;
185}