summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/fifo_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c223
1 files changed, 223 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
new file mode 100644
index 00000000..0762e8bd
--- /dev/null
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -0,0 +1,223 @@
1/*
2 * GM20B Fifo
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "gk20a/fifo_gk20a.h"
27
28#include "fifo_gm20b.h"
29
30#include <nvgpu/timers.h>
31#include <nvgpu/log.h>
32#include <nvgpu/atomic.h>
33#include <nvgpu/barrier.h>
34#include <nvgpu/mm.h>
35
36#include <nvgpu/hw/gm20b/hw_ccsr_gm20b.h>
37#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
38#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
39#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
40#include <nvgpu/hw/gm20b/hw_pbdma_gm20b.h>
41
42void channel_gm20b_bind(struct channel_gk20a *c)
43{
44 struct gk20a *g = c->g;
45
46 u32 inst_ptr = nvgpu_inst_block_addr(g, &c->inst_block)
47 >> ram_in_base_shift_v();
48
49 gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
50 c->chid, inst_ptr);
51
52
53 gk20a_writel(g, ccsr_channel_inst_r(c->chid),
54 ccsr_channel_inst_ptr_f(inst_ptr) |
55 nvgpu_aperture_mask(g, &c->inst_block,
56 ccsr_channel_inst_target_sys_mem_ncoh_f(),
57 ccsr_channel_inst_target_vid_mem_f()) |
58 ccsr_channel_inst_bind_true_f());
59
60 gk20a_writel(g, ccsr_channel_r(c->chid),
61 (gk20a_readl(g, ccsr_channel_r(c->chid)) &
62 ~ccsr_channel_enable_set_f(~0)) |
63 ccsr_channel_enable_set_true_f());
64 nvgpu_smp_wmb();
65 nvgpu_atomic_set(&c->bound, true);
66}
67
68static inline u32 gm20b_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
69{
70 u32 fault_id = ~0;
71 struct fifo_engine_info_gk20a *engine_info;
72
73 engine_info = gk20a_fifo_get_engine_info(g, engine_id);
74
75 if (engine_info) {
76 fault_id = engine_info->fault_id;
77 } else {
78 nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
79 }
80 return fault_id;
81}
82
83void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
84 unsigned long engine_ids)
85{
86 unsigned long delay = GR_IDLE_CHECK_DEFAULT;
87 unsigned long engine_id;
88 int ret = -EBUSY;
89 struct nvgpu_timeout timeout;
90
91 /* trigger faults for all bad engines */
92 for_each_set_bit(engine_id, &engine_ids, 32) {
93 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
94 nvgpu_err(g, "faulting unknown engine %ld", engine_id);
95 } else {
96 u32 mmu_id = gm20b_engine_id_to_mmu_id(g,
97 engine_id);
98 if (mmu_id != (u32)~0)
99 gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id),
100 fifo_trigger_mmu_fault_enable_f(1));
101 }
102 }
103
104 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
105 NVGPU_TIMER_CPU_TIMER);
106
107 /* Wait for MMU fault to trigger */
108 do {
109 if (gk20a_readl(g, fifo_intr_0_r()) &
110 fifo_intr_0_mmu_fault_pending_f()) {
111 ret = 0;
112 break;
113 }
114
115 nvgpu_usleep_range(delay, delay * 2);
116 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
117 } while (!nvgpu_timeout_expired(&timeout));
118
119 if (ret)
120 nvgpu_err(g, "mmu fault timeout");
121
122 /* release mmu fault trigger */
123 for_each_set_bit(engine_id, &engine_ids, 32)
124 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
125}
126
127u32 gm20b_fifo_get_num_fifos(struct gk20a *g)
128{
129 return ccsr_channel__size_1_v();
130}
131
132void gm20b_device_info_data_parse(struct gk20a *g,
133 u32 table_entry, u32 *inst_id,
134 u32 *pri_base, u32 *fault_id)
135{
136 if (top_device_info_data_type_v(table_entry) ==
137 top_device_info_data_type_enum2_v()) {
138 if (pri_base) {
139 *pri_base =
140 (top_device_info_data_pri_base_v(table_entry)
141 << top_device_info_data_pri_base_align_v());
142 }
143 if (fault_id && (top_device_info_data_fault_id_v(table_entry) ==
144 top_device_info_data_fault_id_valid_v())) {
145 *fault_id =
146 top_device_info_data_fault_id_enum_v(table_entry);
147 }
148 } else
149 nvgpu_err(g, "unknown device_info_data %d",
150 top_device_info_data_type_v(table_entry));
151}
152
153void gm20b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
154{
155 /*
156 * These are all errors which indicate something really wrong
157 * going on in the device.
158 */
159 f->intr.pbdma.device_fatal_0 =
160 pbdma_intr_0_memreq_pending_f() |
161 pbdma_intr_0_memack_timeout_pending_f() |
162 pbdma_intr_0_memack_extra_pending_f() |
163 pbdma_intr_0_memdat_timeout_pending_f() |
164 pbdma_intr_0_memdat_extra_pending_f() |
165 pbdma_intr_0_memflush_pending_f() |
166 pbdma_intr_0_memop_pending_f() |
167 pbdma_intr_0_lbconnect_pending_f() |
168 pbdma_intr_0_lback_timeout_pending_f() |
169 pbdma_intr_0_lback_extra_pending_f() |
170 pbdma_intr_0_lbdat_timeout_pending_f() |
171 pbdma_intr_0_lbdat_extra_pending_f() |
172 pbdma_intr_0_pri_pending_f();
173
174 /*
175 * These are data parsing, framing errors or others which can be
176 * recovered from with intervention... or just resetting the
177 * channel
178 */
179 f->intr.pbdma.channel_fatal_0 =
180 pbdma_intr_0_gpfifo_pending_f() |
181 pbdma_intr_0_gpptr_pending_f() |
182 pbdma_intr_0_gpentry_pending_f() |
183 pbdma_intr_0_gpcrc_pending_f() |
184 pbdma_intr_0_pbptr_pending_f() |
185 pbdma_intr_0_pbentry_pending_f() |
186 pbdma_intr_0_pbcrc_pending_f() |
187 pbdma_intr_0_method_pending_f() |
188 pbdma_intr_0_methodcrc_pending_f() |
189 pbdma_intr_0_pbseg_pending_f() |
190 pbdma_intr_0_signature_pending_f();
191
192 /* Can be used for sw-methods, or represents a recoverable timeout. */
193 f->intr.pbdma.restartable_0 =
194 pbdma_intr_0_device_pending_f();
195}
196
197static void gm20b_fifo_set_ctx_reload(struct channel_gk20a *ch)
198{
199 struct gk20a *g = ch->g;
200 u32 channel = gk20a_readl(g, ccsr_channel_r(ch->chid));
201
202 gk20a_writel(g, ccsr_channel_r(ch->chid),
203 channel | ccsr_channel_force_ctx_reload_true_f());
204}
205
206void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch)
207{
208 struct gk20a *g = ch->g;
209 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
210 struct channel_gk20a *temp_ch;
211
212 /* If CTX_RELOAD is set on a channel, move it to some other channel */
213 if (gk20a_fifo_channel_status_is_ctx_reload(ch->g, ch->chid)) {
214 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
215 nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list, channel_gk20a, ch_entry) {
216 if (temp_ch->chid != ch->chid) {
217 gm20b_fifo_set_ctx_reload(temp_ch);
218 break;
219 }
220 }
221 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
222 }
223}