summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/fifo_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 57fd24de..dd56a6f9 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -117,20 +117,20 @@ static int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
117 117
118 gk20a_dbg_fn(""); 118 gk20a_dbg_fn("");
119 119
120 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); 120 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
121 121
122 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), 122 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
123 pbdma_gp_base_offset_f( 123 pbdma_gp_base_offset_f(
124 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); 124 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
125 125
126 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), 126 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
127 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | 127 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
128 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); 128 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
129 129
130 gk20a_mem_wr32(g, mem, ram_fc_signature_w(), 130 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
131 c->g->ops.fifo.get_pbdma_signature(c->g)); 131 c->g->ops.fifo.get_pbdma_signature(c->g));
132 132
133 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), 133 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
134 pbdma_pb_header_priv_user_f() | 134 pbdma_pb_header_priv_user_f() |
135 pbdma_pb_header_method_zero_f() | 135 pbdma_pb_header_method_zero_f() |
136 pbdma_pb_header_subchannel_zero_f() | 136 pbdma_pb_header_subchannel_zero_f() |
@@ -138,44 +138,44 @@ static int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
138 pbdma_pb_header_first_true_f() | 138 pbdma_pb_header_first_true_f() |
139 pbdma_pb_header_type_inc_f()); 139 pbdma_pb_header_type_inc_f());
140 140
141 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), 141 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
142 pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) | 142 pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) |
143 pbdma_subdevice_status_active_f() | 143 pbdma_subdevice_status_active_f() |
144 pbdma_subdevice_channel_dma_enable_f()); 144 pbdma_subdevice_channel_dma_enable_f());
145 145
146 gk20a_mem_wr32(g, mem, ram_fc_target_w(), 146 nvgpu_mem_wr32(g, mem, ram_fc_target_w(),
147 pbdma_target_eng_ctx_valid_true_f() | 147 pbdma_target_eng_ctx_valid_true_f() |
148 pbdma_target_ce_ctx_valid_true_f() | 148 pbdma_target_ce_ctx_valid_true_f() |
149 pbdma_target_engine_sw_f()); 149 pbdma_target_engine_sw_f());
150 150
151 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), 151 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
152 g->ops.fifo.pbdma_acquire_val(acquire_timeout)); 152 g->ops.fifo.pbdma_acquire_val(acquire_timeout));
153 153
154 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), 154 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
155 pbdma_runlist_timeslice_timeout_128_f() | 155 pbdma_runlist_timeslice_timeout_128_f() |
156 pbdma_runlist_timeslice_timescale_3_f() | 156 pbdma_runlist_timeslice_timescale_3_f() |
157 pbdma_runlist_timeslice_enable_true_f()); 157 pbdma_runlist_timeslice_enable_true_f());
158 158
159 159
160 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 160 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
161 161
162 /* Until full subcontext is supported, always use VEID0 */ 162 /* Until full subcontext is supported, always use VEID0 */
163 gk20a_mem_wr32(g, mem, ram_fc_set_channel_info_w(), 163 nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(),
164 pbdma_set_channel_info_scg_type_graphics_compute0_f() | 164 pbdma_set_channel_info_scg_type_graphics_compute0_f() |
165 pbdma_set_channel_info_veid_f(CHANNEL_INFO_VEID0)); 165 pbdma_set_channel_info_veid_f(CHANNEL_INFO_VEID0));
166 166
167 if (c->is_privileged_channel) { 167 if (c->is_privileged_channel) {
168 /* Set privilege level for channel */ 168 /* Set privilege level for channel */
169 gk20a_mem_wr32(g, mem, ram_fc_config_w(), 169 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),
170 pbdma_config_auth_level_privileged_f()); 170 pbdma_config_auth_level_privileged_f());
171 171
172 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 172 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
173 } 173 }
174 174
175 /* Enable userd writeback */ 175 /* Enable userd writeback */
176 data = gk20a_mem_rd32(g, mem, ram_fc_config_w()); 176 data = nvgpu_mem_rd32(g, mem, ram_fc_config_w());
177 data = data | pbdma_config_userd_writeback_enable_f(); 177 data = data | pbdma_config_userd_writeback_enable_f();
178 gk20a_mem_wr32(g, mem, ram_fc_config_w(),data); 178 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),data);
179 179
180 gv11b_userd_writeback_config(g); 180 gv11b_userd_writeback_config(g);
181 181
@@ -196,7 +196,7 @@ static u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c)
196 struct mem_desc *userd_mem = &g->fifo.userd; 196 struct mem_desc *userd_mem = &g->fifo.userd;
197 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32)); 197 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32));
198 198
199 return gk20a_mem_rd32(g, userd_mem, 199 return nvgpu_mem_rd32(g, userd_mem,
200 offset + ram_userd_gp_get_w()); 200 offset + ram_userd_gp_get_w());
201} 201}
202 202
@@ -205,7 +205,7 @@ static void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c)
205 struct mem_desc *userd_mem = &g->fifo.userd; 205 struct mem_desc *userd_mem = &g->fifo.userd;
206 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32)); 206 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32));
207 207
208 gk20a_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(), 208 nvgpu_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(),
209 c->gpfifo.put); 209 c->gpfifo.put);
210 /* commit everything to cpu */ 210 /* commit everything to cpu */
211 smp_mb(); 211 smp_mb();