summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-05-08 08:13:32 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:09:49 -0400
commit48239f5f8cb5763cf4b6dc5db4668257da153cf9 (patch)
treeb12e13c981efe9db4e22cfe696bbd3c62ab77089 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent3e5c123862c87e22311c21558178f287f85ecb5d (diff)
gpu: nvgpu: Prune redundant cache maintenance
Remove redundant cache maintenance operations. Instance blocks and graphics context buffers are uncached, so they do not need any cache maintenance. Bug 1421824 Change-Id: Ie0be67bf0be493d9ec9e6f8226f2f9359cba9f54 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/406948
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c17
1 files changed, 0 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 00f8ac94..61938f8e 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -128,8 +128,6 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
128 gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(), 128 gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
129 ram_in_adr_limit_hi_f(u64_hi32(c->vm->va_limit))); 129 ram_in_adr_limit_hi_f(u64_hi32(c->vm->va_limit)));
130 130
131 gk20a_mm_l2_invalidate(c->g);
132
133 return 0; 131 return 0;
134} 132}
135 133
@@ -159,8 +157,6 @@ static int channel_gk20a_commit_userd(struct channel_gk20a *c)
159 pbdma_userd_target_vid_mem_f() | 157 pbdma_userd_target_vid_mem_f() |
160 pbdma_userd_hi_addr_f(addr_hi)); 158 pbdma_userd_hi_addr_f(addr_hi));
161 159
162 gk20a_mm_l2_invalidate(c->g);
163
164 return 0; 160 return 0;
165} 161}
166 162
@@ -183,9 +179,6 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c,
183 /* preempt the channel */ 179 /* preempt the channel */
184 WARN_ON(gk20a_fifo_preempt_channel(c->g, c->hw_chid)); 180 WARN_ON(gk20a_fifo_preempt_channel(c->g, c->hw_chid));
185 181
186 /* flush GPU cache */
187 gk20a_mm_l2_flush(c->g, true);
188
189 /* value field is 8 bits long */ 182 /* value field is 8 bits long */
190 while (value >= 1 << 8) { 183 while (value >= 1 << 8) {
191 value >>= 1; 184 value >>= 1;
@@ -209,8 +202,6 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c,
209 gk20a_readl(c->g, ccsr_channel_r(c->hw_chid)) | 202 gk20a_readl(c->g, ccsr_channel_r(c->hw_chid)) |
210 ccsr_channel_enable_set_true_f()); 203 ccsr_channel_enable_set_true_f());
211 204
212 gk20a_mm_l2_invalidate(c->g);
213
214 return 0; 205 return 0;
215} 206}
216 207
@@ -277,8 +268,6 @@ static int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
277 268
278 gk20a_mem_wr32(inst_ptr, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 269 gk20a_mem_wr32(inst_ptr, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
279 270
280 gk20a_mm_l2_invalidate(c->g);
281
282 return 0; 271 return 0;
283} 272}
284 273
@@ -299,8 +288,6 @@ static int channel_gk20a_setup_userd(struct channel_gk20a *c)
299 gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_get_w(), 0); 288 gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_get_w(), 0);
300 gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_put_w(), 0); 289 gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_put_w(), 0);
301 290
302 gk20a_mm_l2_invalidate(c->g);
303
304 return 0; 291 return 0;
305} 292}
306 293
@@ -649,8 +636,6 @@ void gk20a_free_channel(struct channel_gk20a *ch, bool finish)
649 ch->gpfifo.cpu_va = NULL; 636 ch->gpfifo.cpu_va = NULL;
650 ch->gpfifo.iova = 0; 637 ch->gpfifo.iova = 0;
651 638
652 gk20a_mm_l2_invalidate(ch->g);
653
654 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); 639 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
655 640
656#if defined(CONFIG_GK20A_CYCLE_STATS) 641#if defined(CONFIG_GK20A_CYCLE_STATS)
@@ -1155,8 +1140,6 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1155 channel_gk20a_setup_userd(c); 1140 channel_gk20a_setup_userd(c);
1156 channel_gk20a_commit_userd(c); 1141 channel_gk20a_commit_userd(c);
1157 1142
1158 gk20a_mm_l2_invalidate(c->g);
1159
1160 /* TBD: setup engine contexts */ 1143 /* TBD: setup engine contexts */
1161 1144
1162 err = channel_gk20a_alloc_priv_cmdbuf(c); 1145 err = channel_gk20a_alloc_priv_cmdbuf(c);