summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorsujeet baranwal <sbaranwal@nvidia.com>2015-02-19 13:34:51 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 21:08:16 -0400
commit8d1ab756ed8a7f4d3138dc5da9d2de9f52915261 (patch)
tree969a5a5aead991570d8c8c56acd41adb2103b8f5 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parentac205be1d31b00c5641df81d53f2da5f143d3354 (diff)
gpu: nvgpu: ioctl for flushing GPU L2
CUDA devtools need to be able to flush the GPU's cache in a sideband fashion and so cannot use methods. This change implements an nvgpu_gpu_ioctl to flush and optionally invalidate the GPU's L2 cache and flush fb. Change-Id: Ib06a0bc8d8880ffbfe4b056518cc3c3df0cc4988 Signed-off-by: sujeet baranwal <sbaranwal@nvidia.com> Signed-off-by: Mayank Kaushik <mkaushik@nvidia.com> Reviewed-on: http://git-master/r/671809 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 3115b5c3..ab3f18ba 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1502,7 +1502,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1502 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32)); 1502 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32));
1503 ctx_header_words >>= 2; 1503 ctx_header_words >>= 2;
1504 1504
1505 gk20a_mm_l2_flush(g, true); 1505 g->ops.mm.l2_flush(g, true);
1506 1506
1507 for (i = 0; i < ctx_header_words; i++) { 1507 for (i = 0; i < ctx_header_words; i++) {
1508 data = gk20a_mem_rd32(ctx_ptr, i); 1508 data = gk20a_mem_rd32(ctx_ptr, i);
@@ -1565,7 +1565,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1565 1565
1566 /* Channel gr_ctx buffer is gpu cacheable. 1566 /* Channel gr_ctx buffer is gpu cacheable.
1567 Flush and invalidate before cpu update. */ 1567 Flush and invalidate before cpu update. */
1568 gk20a_mm_l2_flush(g, true); 1568 g->ops.mm.l2_flush(g, true);
1569 1569
1570 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 1570 ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
1571 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 1571 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
@@ -1605,7 +1605,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
1605 1605
1606 /* Channel gr_ctx buffer is gpu cacheable. 1606 /* Channel gr_ctx buffer is gpu cacheable.
1607 Flush and invalidate before cpu update. */ 1607 Flush and invalidate before cpu update. */
1608 gk20a_mm_l2_flush(g, true); 1608 g->ops.mm.l2_flush(g, true);
1609 1609
1610 ctx_ptr = vmap(ch_ctx->gr_ctx->pages, 1610 ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
1611 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, 1611 PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
@@ -7003,7 +7003,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7003 goto cleanup; 7003 goto cleanup;
7004 } 7004 }
7005 7005
7006 gk20a_mm_l2_flush(g, true); 7006 g->ops.mm.l2_flush(g, true);
7007 7007
7008 /* write to appropriate place in context image, 7008 /* write to appropriate place in context image,
7009 * first have to figure out where that really is */ 7009 * first have to figure out where that really is */