summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/fifo_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/fifo_gp10b.c238
1 files changed, 238 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
new file mode 100644
index 00000000..40bfa2a5
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
@@ -0,0 +1,238 @@
1/*
2 * GP10B fifo
3 *
4 * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/delay.h>
17#include <linux/types.h>
18
19#include "gk20a/gk20a.h"
20#include "gm20b/fifo_gm20b.h"
21#include "hw_pbdma_gp10b.h"
22#include "fifo_gp10b.h"
23#include "hw_ccsr_gp10b.h"
24#include "hw_fifo_gp10b.h"
25#include "hw_ram_gp10b.h"
26#include "hw_top_gp10b.h"
27
28static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
29 struct mem_desc *mem)
30{
31 u32 val;
32
33 gk20a_dbg_fn("");
34
35 val = gk20a_mem_rd32(g, mem,
36 ram_in_page_dir_base_fault_replay_tex_w());
37 val &= ~ram_in_page_dir_base_fault_replay_tex_m();
38 val |= ram_in_page_dir_base_fault_replay_tex_true_f();
39 gk20a_mem_wr32(g, mem,
40 ram_in_page_dir_base_fault_replay_tex_w(), val);
41
42 val = gk20a_mem_rd32(g, mem,
43 ram_in_page_dir_base_fault_replay_gcc_w());
44 val &= ~ram_in_page_dir_base_fault_replay_gcc_m();
45 val |= ram_in_page_dir_base_fault_replay_gcc_true_f();
46 gk20a_mem_wr32(g, mem,
47 ram_in_page_dir_base_fault_replay_gcc_w(), val);
48
49 gk20a_dbg_fn("done");
50}
51
52int channel_gp10b_commit_userd(struct channel_gk20a *c)
53{
54 u32 addr_lo;
55 u32 addr_hi;
56 struct gk20a *g = c->g;
57
58 gk20a_dbg_fn("");
59
60 addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
61 addr_hi = u64_hi32(c->userd_iova);
62
63 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
64 c->hw_chid, (u64)c->userd_iova);
65
66 gk20a_mem_wr32(g, &c->inst_block,
67 ram_in_ramfc_w() + ram_fc_userd_w(),
68 (g->mm.vidmem_is_vidmem ?
69 pbdma_userd_target_sys_mem_ncoh_f() :
70 pbdma_userd_target_vid_mem_f()) |
71 pbdma_userd_addr_f(addr_lo));
72
73 gk20a_mem_wr32(g, &c->inst_block,
74 ram_in_ramfc_w() + ram_fc_userd_hi_w(),
75 pbdma_userd_hi_addr_f(addr_hi));
76
77 return 0;
78}
79
80static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
81 u64 gpfifo_base, u32 gpfifo_entries, u32 flags)
82{
83 struct gk20a *g = c->g;
84 struct mem_desc *mem = &c->inst_block;
85
86 gk20a_dbg_fn("");
87
88 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v());
89
90 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(),
91 pbdma_gp_base_offset_f(
92 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
93
94 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
95 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
96 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
97
98 gk20a_mem_wr32(g, mem, ram_fc_signature_w(),
99 c->g->ops.fifo.get_pbdma_signature(c->g));
100
101 gk20a_mem_wr32(g, mem, ram_fc_formats_w(),
102 pbdma_formats_gp_fermi0_f() |
103 pbdma_formats_pb_fermi1_f() |
104 pbdma_formats_mp_fermi0_f());
105
106 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(),
107 pbdma_pb_header_priv_user_f() |
108 pbdma_pb_header_method_zero_f() |
109 pbdma_pb_header_subchannel_zero_f() |
110 pbdma_pb_header_level_main_f() |
111 pbdma_pb_header_first_true_f() |
112 pbdma_pb_header_type_inc_f());
113
114 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(),
115 pbdma_subdevice_id_f(1) |
116 pbdma_subdevice_status_active_f() |
117 pbdma_subdevice_channel_dma_enable_f());
118
119 gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f());
120
121 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(),
122 channel_gk20a_pbdma_acquire_val(c));
123
124 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
125 pbdma_runlist_timeslice_timeout_128_f() |
126 pbdma_runlist_timeslice_timescale_3_f() |
127 pbdma_runlist_timeslice_enable_true_f());
128
129 if ( flags & NVGPU_ALLOC_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE)
130 gp10b_set_pdb_fault_replay_flags(c->g, mem);
131
132
133 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
134
135 if (c->is_privileged_channel) {
136 /* Set privilege level for channel */
137 gk20a_mem_wr32(g, mem, ram_fc_config_w(),
138 pbdma_config_auth_level_privileged_f());
139
140 gk20a_channel_setup_ramfc_for_privileged_channel(c);
141 }
142
143 return channel_gp10b_commit_userd(c);
144}
145
146static u32 gp10b_fifo_get_pbdma_signature(struct gk20a *g)
147{
148 return g->gpu_characteristics.gpfifo_class
149 | pbdma_signature_sw_zero_f();
150}
151
152static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
153{
154 u32 new_syncpt = 0, old_syncpt;
155 u32 v;
156
157 gk20a_dbg_fn("");
158
159 v = gk20a_mem_rd32(c->g, &c->inst_block,
160 ram_fc_allowed_syncpoints_w());
161 old_syncpt = pbdma_allowed_syncpoints_0_index_v(v);
162 if (c->sync)
163 new_syncpt = c->sync->syncpt_id(c->sync);
164
165 if (new_syncpt && new_syncpt != old_syncpt) {
166 /* disable channel */
167 gk20a_disable_channel_tsg(c->g, c);
168
169 /* preempt the channel */
170 WARN_ON(gk20a_fifo_preempt(c->g, c));
171
172 v = pbdma_allowed_syncpoints_0_valid_f(1);
173
174 gk20a_dbg_info("Channel %d, syncpt id %d\n",
175 c->hw_chid, new_syncpt);
176
177 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt);
178
179 gk20a_mem_wr32(c->g, &c->inst_block,
180 ram_fc_allowed_syncpoints_w(), v);
181 }
182
183 /* enable channel */
184 gk20a_enable_channel_tsg(c->g, c);
185
186 gk20a_dbg_fn("done");
187
188 return 0;
189}
190
191static int gp10b_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
192 u32 *inst_id)
193{
194 int ret = ENGINE_INVAL_GK20A;
195
196 gk20a_dbg_info("engine type %d", engine_type);
197 if (engine_type == top_device_info_type_enum_graphics_v())
198 ret = ENGINE_GR_GK20A;
199 else if (engine_type == top_device_info_type_enum_lce_v()) {
200 /* Default assumptions - all the CE engine have separate runlist */
201 ret = ENGINE_ASYNC_CE_GK20A;
202 }
203
204 return ret;
205}
206
207static void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
208 u32 *inst_id, u32 *pri_base, u32 *fault_id)
209{
210 if (top_device_info_data_type_v(table_entry) ==
211 top_device_info_data_type_enum2_v()) {
212 if (inst_id)
213 *inst_id = top_device_info_data_inst_id_v(table_entry);
214 if (pri_base) {
215 *pri_base =
216 (top_device_info_data_pri_base_v(table_entry)
217 << top_device_info_data_pri_base_align_v());
218 }
219 if (fault_id && (top_device_info_data_fault_id_v(table_entry) ==
220 top_device_info_data_fault_id_valid_v())) {
221 *fault_id =
222 top_device_info_data_fault_id_enum_v(table_entry);
223 }
224 } else
225 gk20a_err(g->dev, "unknown device_info_data %d",
226 top_device_info_data_type_v(table_entry));
227}
228
229void gp10b_init_fifo(struct gpu_ops *gops)
230{
231 gm20b_init_fifo(gops);
232 gops->fifo.setup_ramfc = channel_gp10b_setup_ramfc;
233 gops->fifo.get_pbdma_signature = gp10b_fifo_get_pbdma_signature;
234 gops->fifo.resetup_ramfc = gp10b_fifo_resetup_ramfc;
235 gops->fifo.engine_enum_from_type = gp10b_fifo_engine_enum_from_type;
236 gops->fifo.device_info_data_parse = gp10b_device_info_data_parse;
237 gops->fifo.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v;
238}