summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mc_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.c241
1 files changed, 241 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
new file mode 100644
index 00000000..9d9256bd
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
@@ -0,0 +1,241 @@
1/*
2 * GK20A Master Control
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <trace/events/gk20a.h>
26
27#include "gk20a.h"
28#include "mc_gk20a.h"
29
30#include <nvgpu/timers.h>
31#include <nvgpu/atomic.h>
32#include <nvgpu/unit.h>
33
34#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
35
36void mc_gk20a_isr_stall(struct gk20a *g)
37{
38 u32 mc_intr_0;
39 u32 engine_id_idx;
40 u32 active_engine_id = 0;
41 u32 engine_enum = ENGINE_INVAL_GK20A;
42
43 mc_intr_0 = g->ops.mc.intr_stall(g);
44
45 gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
46
47 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
48 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
49
50 if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) {
51 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
52 /* GR Engine */
53 if (engine_enum == ENGINE_GR_GK20A) {
54 gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
55 }
56
57 /* CE Engine */
58 if (((engine_enum == ENGINE_GRCE_GK20A) ||
59 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
60 g->ops.ce2.isr_stall){
61 g->ops.ce2.isr_stall(g,
62 g->fifo.engine_info[active_engine_id].inst_id,
63 g->fifo.engine_info[active_engine_id].pri_base);
64 }
65 }
66 }
67 if (mc_intr_0 & mc_intr_0_pfifo_pending_f())
68 gk20a_fifo_isr(g);
69 if (mc_intr_0 & mc_intr_0_pmu_pending_f())
70 gk20a_pmu_isr(g);
71 if (mc_intr_0 & mc_intr_0_priv_ring_pending_f())
72 g->ops.priv_ring.isr(g);
73 if (mc_intr_0 & mc_intr_0_ltc_pending_f())
74 g->ops.ltc.isr(g);
75 if (mc_intr_0 & mc_intr_0_pbus_pending_f())
76 g->ops.bus.isr(g);
77}
78
79void mc_gk20a_intr_enable(struct gk20a *g)
80{
81 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
82
83 gk20a_writel(g, mc_intr_mask_1_r(),
84 mc_intr_0_pfifo_pending_f()
85 | eng_intr_mask);
86 gk20a_writel(g, mc_intr_en_1_r(),
87 mc_intr_en_1_inta_hardware_f());
88
89 gk20a_writel(g, mc_intr_mask_0_r(),
90 mc_intr_0_pfifo_pending_f()
91 | mc_intr_0_priv_ring_pending_f()
92 | mc_intr_0_ltc_pending_f()
93 | mc_intr_0_pbus_pending_f()
94 | eng_intr_mask);
95 gk20a_writel(g, mc_intr_en_0_r(),
96 mc_intr_en_0_inta_hardware_f());
97}
98
99void mc_gk20a_intr_unit_config(struct gk20a *g, bool enable,
100 bool is_stalling, u32 mask)
101{
102 u32 mask_reg = (is_stalling ? mc_intr_mask_0_r() :
103 mc_intr_mask_1_r());
104
105 if (enable) {
106 gk20a_writel(g, mask_reg,
107 gk20a_readl(g, mask_reg) |
108 mask);
109 } else {
110 gk20a_writel(g, mask_reg,
111 gk20a_readl(g, mask_reg) &
112 ~mask);
113 }
114}
115
116void mc_gk20a_intr_stall_pause(struct gk20a *g)
117{
118 gk20a_writel(g, mc_intr_en_0_r(),
119 mc_intr_en_0_inta_disabled_f());
120
121 /* flush previous write */
122 gk20a_readl(g, mc_intr_en_0_r());
123}
124
125void mc_gk20a_intr_stall_resume(struct gk20a *g)
126{
127 gk20a_writel(g, mc_intr_en_0_r(),
128 mc_intr_en_0_inta_hardware_f());
129
130 /* flush previous write */
131 gk20a_readl(g, mc_intr_en_0_r());
132}
133
134void mc_gk20a_intr_nonstall_pause(struct gk20a *g)
135{
136 gk20a_writel(g, mc_intr_en_1_r(),
137 mc_intr_en_0_inta_disabled_f());
138
139 /* flush previous write */
140 gk20a_readl(g, mc_intr_en_1_r());
141}
142
143void mc_gk20a_intr_nonstall_resume(struct gk20a *g)
144{
145 gk20a_writel(g, mc_intr_en_1_r(),
146 mc_intr_en_0_inta_hardware_f());
147
148 /* flush previous write */
149 gk20a_readl(g, mc_intr_en_1_r());
150}
151
152u32 mc_gk20a_intr_stall(struct gk20a *g)
153{
154 return gk20a_readl(g, mc_intr_0_r());
155}
156
157u32 mc_gk20a_intr_nonstall(struct gk20a *g)
158{
159 return gk20a_readl(g, mc_intr_1_r());
160}
161
162void gk20a_mc_disable(struct gk20a *g, u32 units)
163{
164 u32 pmc;
165
166 gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units);
167
168 nvgpu_spinlock_acquire(&g->mc_enable_lock);
169 pmc = gk20a_readl(g, mc_enable_r());
170 pmc &= ~units;
171 gk20a_writel(g, mc_enable_r(), pmc);
172 nvgpu_spinlock_release(&g->mc_enable_lock);
173}
174
175void gk20a_mc_enable(struct gk20a *g, u32 units)
176{
177 u32 pmc;
178
179 gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units);
180
181 nvgpu_spinlock_acquire(&g->mc_enable_lock);
182 pmc = gk20a_readl(g, mc_enable_r());
183 pmc |= units;
184 gk20a_writel(g, mc_enable_r(), pmc);
185 gk20a_readl(g, mc_enable_r());
186 nvgpu_spinlock_release(&g->mc_enable_lock);
187
188 nvgpu_udelay(20);
189}
190
191void gk20a_mc_reset(struct gk20a *g, u32 units)
192{
193 g->ops.mc.disable(g, units);
194 if (units & gk20a_fifo_get_all_ce_engine_reset_mask(g))
195 nvgpu_udelay(500);
196 else
197 nvgpu_udelay(20);
198 g->ops.mc.enable(g, units);
199}
200
201u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
202{
203 u32 val = gk20a_readl(g, mc_boot_0_r());
204
205 if (arch)
206 *arch = mc_boot_0_architecture_v(val) <<
207 NVGPU_GPU_ARCHITECTURE_SHIFT;
208
209 if (impl)
210 *impl = mc_boot_0_implementation_v(val);
211
212 if (rev)
213 *rev = (mc_boot_0_major_revision_v(val) << 4) |
214 mc_boot_0_minor_revision_v(val);
215
216 return val;
217}
218
219bool mc_gk20a_is_intr1_pending(struct gk20a *g,
220 enum nvgpu_unit unit, u32 mc_intr_1)
221{
222 u32 mask = 0;
223 bool is_pending;
224
225 switch (unit) {
226 case NVGPU_UNIT_FIFO:
227 mask = mc_intr_0_pfifo_pending_f();
228 break;
229 default:
230 break;
231 }
232
233 if (mask == 0) {
234 nvgpu_err(g, "unknown unit %d", unit);
235 is_pending = false;
236 } else {
237 is_pending = (mc_intr_1 & mask) ? true : false;
238 }
239
240 return is_pending;
241}