summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorSourab Gupta <sourabg@nvidia.com>2018-03-28 02:44:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-04-05 08:22:58 -0400
commitabd5f68eef7a1b26f95cc9dd07515b49e29219de (patch)
tree94bc998200b1a53ee6e1608d2e8fb4a2bce263f7 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent03b87689025b86b145236a9c707e31a3d3214eb0 (diff)
gpu: nvgpu: add usermode submission interface HAL
The patch adds the HAL interfaces for handling the usermode submission, particularly allocating channel specific usermode userd. These interfaces are currently implemented only on QNX, and are created accordingly. As and when linux adds the usermode submission support, we can revisit them if any further changes are needed. Change-Id: I790e0ebdfaedcdc5f6bb624652b1af4549b7b062 Signed-off-by: Sourab Gupta <sourabg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1683392 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 83c49d52..a68968fe 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -376,6 +376,13 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
376 if(g->ops.fifo.free_channel_ctx_header) 376 if(g->ops.fifo.free_channel_ctx_header)
377 g->ops.fifo.free_channel_ctx_header(ch); 377 g->ops.fifo.free_channel_ctx_header(ch);
378 378
379 if (ch->usermode_submit_enabled) {
380 gk20a_channel_free_usermode_buffers(ch);
381 ch->userd_iova = nvgpu_mem_get_addr(g, &f->userd) +
382 ch->chid * f->userd_entry_size;
383 ch->usermode_submit_enabled = false;
384 }
385
379 gk20a_gr_flush_channel_tlb(gr); 386 gk20a_gr_flush_channel_tlb(gr);
380 387
381 nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem); 388 nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem);
@@ -1086,12 +1093,30 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1086 goto clean_up_idle; 1093 goto clean_up_idle;
1087 } 1094 }
1088 1095
1096 if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_USERMODE_SUPPORT) {
1097 if (g->ops.fifo.alloc_usermode_buffers) {
1098 err = g->ops.fifo.alloc_usermode_buffers(c,
1099 gpfifo_args);
1100 if (err) {
1101 nvgpu_err(g, "Usermode buffer alloc failed");
1102 goto clean_up;
1103 }
1104 c->userd_iova = nvgpu_mem_get_addr(g,
1105 &c->usermode_userd);
1106 c->usermode_submit_enabled = true;
1107 } else {
1108 nvgpu_err(g, "Usermode submit not supported");
1109 err = -EINVAL;
1110 goto clean_up;
1111 }
1112 }
1113
1089 err = nvgpu_dma_alloc_map_sys(ch_vm, 1114 err = nvgpu_dma_alloc_map_sys(ch_vm,
1090 gpfifo_size * gpfifo_entry_size, 1115 gpfifo_size * gpfifo_entry_size,
1091 &c->gpfifo.mem); 1116 &c->gpfifo.mem);
1092 if (err) { 1117 if (err) {
1093 nvgpu_err(g, "%s: memory allocation failed", __func__); 1118 nvgpu_err(g, "%s: memory allocation failed", __func__);
1094 goto clean_up; 1119 goto clean_up_usermode;
1095 } 1120 }
1096 1121
1097 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 1122 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
@@ -1174,6 +1199,13 @@ clean_up_sync:
1174clean_up_unmap: 1199clean_up_unmap:
1175 nvgpu_big_free(g, c->gpfifo.pipe); 1200 nvgpu_big_free(g, c->gpfifo.pipe);
1176 nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem); 1201 nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
1202clean_up_usermode:
1203 if (c->usermode_submit_enabled) {
1204 gk20a_channel_free_usermode_buffers(c);
1205 c->userd_iova = nvgpu_mem_get_addr(g, &g->fifo.userd) +
1206 c->chid * g->fifo.userd_entry_size;
1207 c->usermode_submit_enabled = false;
1208 }
1177clean_up: 1209clean_up:
1178 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); 1210 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
1179clean_up_idle: 1211clean_up_idle:
@@ -1187,6 +1219,12 @@ clean_up_idle:
1187 return err; 1219 return err;
1188} 1220}
1189 1221
1222void gk20a_channel_free_usermode_buffers(struct channel_gk20a *c)
1223{
1224 if (nvgpu_mem_is_valid(&c->usermode_userd))
1225 nvgpu_dma_free(c->g, &c->usermode_userd);
1226}
1227
1190/* Update with this periodically to determine how the gpfifo is draining. */ 1228/* Update with this periodically to determine how the gpfifo is draining. */
1191static inline u32 update_gp_get(struct gk20a *g, 1229static inline u32 update_gp_get(struct gk20a *g,
1192 struct channel_gk20a *c) 1230 struct channel_gk20a *c)