diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/common/fifo/channel.c | 28 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 12 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/channel.h | 18 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/gk20a.h | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/os/linux/cde.c | 19 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/os/linux/ioctl_channel.c | 86 |
7 files changed, 103 insertions, 66 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c index fefd90d8..adb59ac4 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel.c +++ b/drivers/gpu/nvgpu/common/fifo/channel.c | |||
@@ -1131,8 +1131,8 @@ static void channel_gk20a_free_prealloc_resources(struct channel_gk20a *c) | |||
1131 | c->joblist.pre_alloc.enabled = false; | 1131 | c->joblist.pre_alloc.enabled = false; |
1132 | } | 1132 | } |
1133 | 1133 | ||
1134 | int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | 1134 | int nvgpu_channel_setup_bind(struct channel_gk20a *c, |
1135 | struct nvgpu_gpfifo_args *gpfifo_args) | 1135 | struct nvgpu_setup_bind_args *args) |
1136 | { | 1136 | { |
1137 | struct gk20a *g = c->g; | 1137 | struct gk20a *g = c->g; |
1138 | struct vm_gk20a *ch_vm; | 1138 | struct vm_gk20a *ch_vm; |
@@ -1141,14 +1141,14 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1141 | int err = 0; | 1141 | int err = 0; |
1142 | unsigned long acquire_timeout; | 1142 | unsigned long acquire_timeout; |
1143 | 1143 | ||
1144 | gpfifo_size = gpfifo_args->num_entries; | 1144 | gpfifo_size = args->num_gpfifo_entries; |
1145 | gpfifo_entry_size = nvgpu_get_gpfifo_entry_size(); | 1145 | gpfifo_entry_size = nvgpu_get_gpfifo_entry_size(); |
1146 | 1146 | ||
1147 | if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_SUPPORT_VPR) { | 1147 | if (args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR) { |
1148 | c->vpr = true; | 1148 | c->vpr = true; |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC) { | 1151 | if (args->flags & NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC) { |
1152 | nvgpu_rwsem_down_read(&g->deterministic_busy); | 1152 | nvgpu_rwsem_down_read(&g->deterministic_busy); |
1153 | /* | 1153 | /* |
1154 | * Railgating isn't deterministic; instead of disallowing | 1154 | * Railgating isn't deterministic; instead of disallowing |
@@ -1172,8 +1172,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1172 | /* an address space needs to have been bound at this point. */ | 1172 | /* an address space needs to have been bound at this point. */ |
1173 | if (!gk20a_channel_as_bound(c)) { | 1173 | if (!gk20a_channel_as_bound(c)) { |
1174 | nvgpu_err(g, | 1174 | nvgpu_err(g, |
1175 | "not bound to an address space at time of gpfifo" | 1175 | "not bound to an address space at time of setup_bind"); |
1176 | " allocation."); | ||
1177 | err = -EINVAL; | 1176 | err = -EINVAL; |
1178 | goto clean_up_idle; | 1177 | goto clean_up_idle; |
1179 | } | 1178 | } |
@@ -1187,10 +1186,9 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1187 | goto clean_up_idle; | 1186 | goto clean_up_idle; |
1188 | } | 1187 | } |
1189 | 1188 | ||
1190 | if (gpfifo_args->flags & NVGPU_GPFIFO_FLAGS_USERMODE_SUPPORT) { | 1189 | if (args->flags & NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT) { |
1191 | if (g->os_channel.alloc_usermode_buffers) { | 1190 | if (g->os_channel.alloc_usermode_buffers) { |
1192 | err = g->os_channel.alloc_usermode_buffers(c, | 1191 | err = g->os_channel.alloc_usermode_buffers(c, args); |
1193 | gpfifo_args); | ||
1194 | if (err) { | 1192 | if (err) { |
1195 | nvgpu_err(g, "Usermode buffer alloc failed"); | 1193 | nvgpu_err(g, "Usermode buffer alloc failed"); |
1196 | goto clean_up; | 1194 | goto clean_up; |
@@ -1258,23 +1256,23 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1258 | 1256 | ||
1259 | err = g->ops.fifo.setup_ramfc(c, gpfifo_gpu_va, | 1257 | err = g->ops.fifo.setup_ramfc(c, gpfifo_gpu_va, |
1260 | c->gpfifo.entry_num, | 1258 | c->gpfifo.entry_num, |
1261 | acquire_timeout, gpfifo_args->flags); | 1259 | acquire_timeout, args->flags); |
1262 | if (err) { | 1260 | if (err) { |
1263 | goto clean_up_sync; | 1261 | goto clean_up_sync; |
1264 | } | 1262 | } |
1265 | 1263 | ||
1266 | /* TBD: setup engine contexts */ | 1264 | /* TBD: setup engine contexts */ |
1267 | 1265 | ||
1268 | if (c->deterministic && gpfifo_args->num_inflight_jobs != 0U) { | 1266 | if (c->deterministic && args->num_inflight_jobs != 0U) { |
1269 | err = channel_gk20a_prealloc_resources(c, | 1267 | err = channel_gk20a_prealloc_resources(c, |
1270 | gpfifo_args->num_inflight_jobs); | 1268 | args->num_inflight_jobs); |
1271 | if (err) { | 1269 | if (err) { |
1272 | goto clean_up_sync; | 1270 | goto clean_up_sync; |
1273 | } | 1271 | } |
1274 | } | 1272 | } |
1275 | 1273 | ||
1276 | err = channel_gk20a_alloc_priv_cmdbuf(c, | 1274 | err = channel_gk20a_alloc_priv_cmdbuf(c, |
1277 | gpfifo_args->num_inflight_jobs); | 1275 | args->num_inflight_jobs); |
1278 | if (err) { | 1276 | if (err) { |
1279 | goto clean_up_prealloc; | 1277 | goto clean_up_prealloc; |
1280 | } | 1278 | } |
@@ -1292,7 +1290,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1292 | clean_up_priv_cmd: | 1290 | clean_up_priv_cmd: |
1293 | channel_gk20a_free_priv_cmdbuf(c); | 1291 | channel_gk20a_free_priv_cmdbuf(c); |
1294 | clean_up_prealloc: | 1292 | clean_up_prealloc: |
1295 | if (c->deterministic && gpfifo_args->num_inflight_jobs != 0U) { | 1293 | if (c->deterministic && args->num_inflight_jobs != 0U) { |
1296 | channel_gk20a_free_prealloc_resources(c); | 1294 | channel_gk20a_free_prealloc_resources(c); |
1297 | } | 1295 | } |
1298 | clean_up_sync: | 1296 | clean_up_sync: |
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 5052fc35..9dcba25a 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | |||
@@ -420,7 +420,7 @@ u32 gk20a_ce_create_context(struct gk20a *g, | |||
420 | { | 420 | { |
421 | struct gk20a_gpu_ctx *ce_ctx; | 421 | struct gk20a_gpu_ctx *ce_ctx; |
422 | struct gk20a_ce_app *ce_app = &g->ce_app; | 422 | struct gk20a_ce_app *ce_app = &g->ce_app; |
423 | struct nvgpu_gpfifo_args gpfifo_args; | 423 | struct nvgpu_setup_bind_args setup_bind_args; |
424 | u32 ctx_id = ~0; | 424 | u32 ctx_id = ~0; |
425 | int err = 0; | 425 | int err = 0; |
426 | 426 | ||
@@ -476,13 +476,13 @@ u32 gk20a_ce_create_context(struct gk20a *g, | |||
476 | goto end; | 476 | goto end; |
477 | } | 477 | } |
478 | 478 | ||
479 | gpfifo_args.num_entries = 1024; | 479 | setup_bind_args.num_gpfifo_entries = 1024; |
480 | gpfifo_args.num_inflight_jobs = 0; | 480 | setup_bind_args.num_inflight_jobs = 0; |
481 | gpfifo_args.flags = 0; | 481 | setup_bind_args.flags = 0; |
482 | /* allocate gpfifo (1024 should be more than enough) */ | 482 | /* allocate gpfifo (1024 should be more than enough) */ |
483 | err = gk20a_channel_alloc_gpfifo(ce_ctx->ch, &gpfifo_args); | 483 | err = nvgpu_channel_setup_bind(ce_ctx->ch, &setup_bind_args); |
484 | if (err) { | 484 | if (err) { |
485 | nvgpu_err(g, "ce: unable to allocate gpfifo"); | 485 | nvgpu_err(g, "ce: unable to setup and bind channel"); |
486 | goto end; | 486 | goto end; |
487 | } | 487 | } |
488 | 488 | ||
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 3c2de4f2..36cb5306 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | |||
@@ -150,7 +150,7 @@ int channel_gv11b_setup_ramfc(struct channel_gk20a *c, | |||
150 | 150 | ||
151 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); | 151 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); |
152 | 152 | ||
153 | if ((flags & NVGPU_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE) != 0) { | 153 | if ((flags & NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE) != 0) { |
154 | replayable = true; | 154 | replayable = true; |
155 | } | 155 | } |
156 | gv11b_init_subcontext_pdb(c->vm, mem, replayable); | 156 | gv11b_init_subcontext_pdb(c->vm, mem, replayable); |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel.h b/drivers/gpu/nvgpu/include/nvgpu/channel.h index ba3d548e..d7bf7816 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/channel.h +++ b/drivers/gpu/nvgpu/include/nvgpu/channel.h | |||
@@ -38,11 +38,11 @@ struct fifo_profile_gk20a; | |||
38 | struct nvgpu_channel_sync; | 38 | struct nvgpu_channel_sync; |
39 | struct nvgpu_gpfifo_userdata; | 39 | struct nvgpu_gpfifo_userdata; |
40 | 40 | ||
41 | /* Flags to be passed to gk20a_channel_alloc_gpfifo() */ | 41 | /* Flags to be passed to nvgpu_channel_setup_bind() */ |
42 | #define NVGPU_GPFIFO_FLAGS_SUPPORT_VPR (1U << 0U) | 42 | #define NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR (1U << 0U) |
43 | #define NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC (1U << 1U) | 43 | #define NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC (1U << 1U) |
44 | #define NVGPU_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE (1U << 2U) | 44 | #define NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE (1U << 2U) |
45 | #define NVGPU_GPFIFO_FLAGS_USERMODE_SUPPORT (1U << 3U) | 45 | #define NVGPU_SETUP_BIND_FLAGS_USERMODE_SUPPORT (1U << 3U) |
46 | 46 | ||
47 | /* Flags to be passed to nvgpu_submit_channel_gpfifo() */ | 47 | /* Flags to be passed to nvgpu_submit_channel_gpfifo() */ |
48 | #define NVGPU_SUBMIT_FLAGS_FENCE_WAIT (1U << 0U) | 48 | #define NVGPU_SUBMIT_FLAGS_FENCE_WAIT (1U << 0U) |
@@ -91,8 +91,8 @@ struct gpfifo_desc { | |||
91 | void *pipe; | 91 | void *pipe; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | struct nvgpu_gpfifo_args { | 94 | struct nvgpu_setup_bind_args { |
95 | u32 num_entries; | 95 | u32 num_gpfifo_entries; |
96 | u32 num_inflight_jobs; | 96 | u32 num_inflight_jobs; |
97 | u32 userd_dmabuf_fd; | 97 | u32 userd_dmabuf_fd; |
98 | u32 gpfifo_dmabuf_fd; | 98 | u32 gpfifo_dmabuf_fd; |
@@ -407,8 +407,8 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, | |||
407 | bool is_privileged_channel, | 407 | bool is_privileged_channel, |
408 | pid_t pid, pid_t tid); | 408 | pid_t pid, pid_t tid); |
409 | 409 | ||
410 | int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | 410 | int nvgpu_channel_setup_bind(struct channel_gk20a *c, |
411 | struct nvgpu_gpfifo_args *gpfifo_args); | 411 | struct nvgpu_setup_bind_args *args); |
412 | 412 | ||
413 | void gk20a_channel_timeout_restart_all_channels(struct gk20a *g); | 413 | void gk20a_channel_timeout_restart_all_channels(struct gk20a *g); |
414 | 414 | ||
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index c9002f47..d523cf5f 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h | |||
@@ -47,7 +47,7 @@ struct nvgpu_clk_arb; | |||
47 | struct nvgpu_gpu_ctxsw_trace_filter; | 47 | struct nvgpu_gpu_ctxsw_trace_filter; |
48 | #endif | 48 | #endif |
49 | struct priv_cmd_entry; | 49 | struct priv_cmd_entry; |
50 | struct nvgpu_gpfifo_args; | 50 | struct nvgpu_setup_bind_args; |
51 | 51 | ||
52 | #ifdef __KERNEL__ | 52 | #ifdef __KERNEL__ |
53 | #include <linux/notifier.h> | 53 | #include <linux/notifier.h> |
@@ -1604,7 +1604,7 @@ struct gk20a { | |||
1604 | struct nvgpu_gpfifo_userdata userdata, | 1604 | struct nvgpu_gpfifo_userdata userdata, |
1605 | u32 start, u32 length); | 1605 | u32 start, u32 length); |
1606 | int (*alloc_usermode_buffers)(struct channel_gk20a *c, | 1606 | int (*alloc_usermode_buffers)(struct channel_gk20a *c, |
1607 | struct nvgpu_gpfifo_args *gpfifo_args); | 1607 | struct nvgpu_setup_bind_args *gpfifo_args); |
1608 | } os_channel; | 1608 | } os_channel; |
1609 | 1609 | ||
1610 | struct gk20a_scale_profile *scale_profile; | 1610 | struct gk20a_scale_profile *scale_profile; |
diff --git a/drivers/gpu/nvgpu/os/linux/cde.c b/drivers/gpu/nvgpu/os/linux/cde.c index 7b2cba7d..715513c9 100644 --- a/drivers/gpu/nvgpu/os/linux/cde.c +++ b/drivers/gpu/nvgpu/os/linux/cde.c | |||
@@ -1312,7 +1312,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) | |||
1312 | struct channel_gk20a *ch; | 1312 | struct channel_gk20a *ch; |
1313 | struct tsg_gk20a *tsg; | 1313 | struct tsg_gk20a *tsg; |
1314 | struct gr_gk20a *gr = &g->gr; | 1314 | struct gr_gk20a *gr = &g->gr; |
1315 | struct nvgpu_gpfifo_args gpfifo_args; | 1315 | struct nvgpu_setup_bind_args setup_bind_args; |
1316 | int err = 0; | 1316 | int err = 0; |
1317 | u64 vaddr; | 1317 | u64 vaddr; |
1318 | 1318 | ||
@@ -1351,17 +1351,16 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) | |||
1351 | err = gk20a_tsg_bind_channel(tsg, ch); | 1351 | err = gk20a_tsg_bind_channel(tsg, ch); |
1352 | if (err) { | 1352 | if (err) { |
1353 | nvgpu_err(g, "cde: unable to bind to tsg"); | 1353 | nvgpu_err(g, "cde: unable to bind to tsg"); |
1354 | goto err_alloc_gpfifo; | 1354 | goto err_setup_bind; |
1355 | } | 1355 | } |
1356 | 1356 | ||
1357 | gpfifo_args.num_entries = 1024; | 1357 | setup_bind_args.num_gpfifo_entries = 1024; |
1358 | gpfifo_args.num_inflight_jobs = 0; | 1358 | setup_bind_args.num_inflight_jobs = 0; |
1359 | gpfifo_args.flags = 0; | 1359 | setup_bind_args.flags = 0; |
1360 | /* allocate gpfifo (1024 should be more than enough) */ | 1360 | err = nvgpu_channel_setup_bind(ch, &setup_bind_args); |
1361 | err = gk20a_channel_alloc_gpfifo(ch, &gpfifo_args); | ||
1362 | if (err) { | 1361 | if (err) { |
1363 | nvgpu_warn(g, "cde: unable to allocate gpfifo"); | 1362 | nvgpu_warn(g, "cde: unable to setup channel"); |
1364 | goto err_alloc_gpfifo; | 1363 | goto err_setup_bind; |
1365 | } | 1364 | } |
1366 | 1365 | ||
1367 | /* map backing store to gpu virtual space */ | 1366 | /* map backing store to gpu virtual space */ |
@@ -1399,7 +1398,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) | |||
1399 | err_init_cde_img: | 1398 | err_init_cde_img: |
1400 | nvgpu_gmmu_unmap(ch->vm, &g->gr.compbit_store.mem, vaddr); | 1399 | nvgpu_gmmu_unmap(ch->vm, &g->gr.compbit_store.mem, vaddr); |
1401 | err_map_backingstore: | 1400 | err_map_backingstore: |
1402 | err_alloc_gpfifo: | 1401 | err_setup_bind: |
1403 | nvgpu_vm_put(ch->vm); | 1402 | nvgpu_vm_put(ch->vm); |
1404 | err_commit_va: | 1403 | err_commit_va: |
1405 | err_get_gk20a_channel: | 1404 | err_get_gk20a_channel: |
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c index d0d4b1af..d243c425 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c | |||
@@ -577,45 +577,59 @@ clean_up: | |||
577 | return err; | 577 | return err; |
578 | } | 578 | } |
579 | 579 | ||
580 | static u32 nvgpu_gpfifo_user_flags_to_common_flags(u32 user_flags) | 580 | static u32 nvgpu_setup_bind_user_flags_to_common_flags(u32 user_flags) |
581 | { | 581 | { |
582 | u32 flags = 0; | 582 | u32 flags = 0; |
583 | 583 | ||
584 | if (user_flags & NVGPU_ALLOC_GPFIFO_EX_FLAGS_VPR_ENABLED) | 584 | if (user_flags & NVGPU_CHANNEL_SETUP_BIND_FLAGS_VPR_ENABLED) |
585 | flags |= NVGPU_GPFIFO_FLAGS_SUPPORT_VPR; | 585 | flags |= NVGPU_SETUP_BIND_FLAGS_SUPPORT_VPR; |
586 | 586 | ||
587 | if (user_flags & NVGPU_ALLOC_GPFIFO_EX_FLAGS_DETERMINISTIC) | 587 | if (user_flags & NVGPU_CHANNEL_SETUP_BIND_FLAGS_DETERMINISTIC) |
588 | flags |= NVGPU_GPFIFO_FLAGS_SUPPORT_DETERMINISTIC; | 588 | flags |= NVGPU_SETUP_BIND_FLAGS_SUPPORT_DETERMINISTIC; |
589 | 589 | ||
590 | if (user_flags & NVGPU_ALLOC_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE) | 590 | if (user_flags & NVGPU_CHANNEL_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE) |
591 | flags |= NVGPU_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE; | 591 | flags |= NVGPU_SETUP_BIND_FLAGS_REPLAYABLE_FAULTS_ENABLE; |
592 | 592 | ||
593 | return flags; | 593 | return flags; |
594 | } | 594 | } |
595 | 595 | ||
596 | static void nvgpu_get_setup_bind_args( | ||
597 | struct nvgpu_channel_setup_bind_args *channel_setup_bind_args, | ||
598 | struct nvgpu_setup_bind_args *setup_bind_args) | ||
599 | { | ||
600 | setup_bind_args->num_gpfifo_entries = | ||
601 | channel_setup_bind_args->num_gpfifo_entries; | ||
602 | setup_bind_args->num_inflight_jobs = | ||
603 | channel_setup_bind_args->num_inflight_jobs; | ||
604 | setup_bind_args->flags = nvgpu_setup_bind_user_flags_to_common_flags( | ||
605 | channel_setup_bind_args->flags); | ||
606 | } | ||
607 | |||
596 | static void nvgpu_get_gpfifo_ex_args( | 608 | static void nvgpu_get_gpfifo_ex_args( |
597 | struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args, | 609 | struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args, |
598 | struct nvgpu_gpfifo_args *gpfifo_args) | 610 | struct nvgpu_setup_bind_args *setup_bind_args) |
599 | { | 611 | { |
600 | gpfifo_args->num_entries = alloc_gpfifo_ex_args->num_entries; | 612 | setup_bind_args->num_gpfifo_entries = alloc_gpfifo_ex_args->num_entries; |
601 | gpfifo_args->num_inflight_jobs = alloc_gpfifo_ex_args->num_inflight_jobs; | 613 | setup_bind_args->num_inflight_jobs = |
602 | gpfifo_args->flags = nvgpu_gpfifo_user_flags_to_common_flags( | 614 | alloc_gpfifo_ex_args->num_inflight_jobs; |
603 | alloc_gpfifo_ex_args->flags); | 615 | setup_bind_args->flags = nvgpu_setup_bind_user_flags_to_common_flags( |
616 | alloc_gpfifo_ex_args->flags); | ||
604 | } | 617 | } |
605 | 618 | ||
606 | static void nvgpu_get_gpfifo_args( | 619 | static void nvgpu_get_gpfifo_args( |
607 | struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args, | 620 | struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args, |
608 | struct nvgpu_gpfifo_args *gpfifo_args) | 621 | struct nvgpu_setup_bind_args *setup_bind_args) |
609 | { | 622 | { |
610 | /* | 623 | /* |
611 | * Kernel can insert one extra gpfifo entry before user | 624 | * Kernel can insert one extra gpfifo entry before user |
612 | * submitted gpfifos and another one after, for internal usage. | 625 | * submitted gpfifos and another one after, for internal usage. |
613 | * Triple the requested size. | 626 | * Triple the requested size. |
614 | */ | 627 | */ |
615 | gpfifo_args->num_entries = alloc_gpfifo_args->num_entries * 3; | 628 | setup_bind_args->num_gpfifo_entries = |
616 | gpfifo_args->num_inflight_jobs = 0; | 629 | alloc_gpfifo_args->num_entries * 3; |
617 | gpfifo_args->flags = nvgpu_gpfifo_user_flags_to_common_flags( | 630 | setup_bind_args->num_inflight_jobs = 0; |
618 | alloc_gpfifo_args->flags); | 631 | setup_bind_args->flags = nvgpu_setup_bind_user_flags_to_common_flags( |
632 | alloc_gpfifo_args->flags); | ||
619 | } | 633 | } |
620 | 634 | ||
621 | static void nvgpu_get_fence_args( | 635 | static void nvgpu_get_fence_args( |
@@ -1119,13 +1133,39 @@ long gk20a_channel_ioctl(struct file *filp, | |||
1119 | gk20a_idle(ch->g); | 1133 | gk20a_idle(ch->g); |
1120 | break; | 1134 | break; |
1121 | } | 1135 | } |
1136 | case NVGPU_IOCTL_CHANNEL_SETUP_BIND: | ||
1137 | { | ||
1138 | struct nvgpu_channel_setup_bind_args *channel_setup_bind_args = | ||
1139 | (struct nvgpu_channel_setup_bind_args *)buf; | ||
1140 | struct nvgpu_setup_bind_args setup_bind_args; | ||
1141 | |||
1142 | nvgpu_get_setup_bind_args(channel_setup_bind_args, | ||
1143 | &setup_bind_args); | ||
1144 | |||
1145 | err = gk20a_busy(ch->g); | ||
1146 | if (err) { | ||
1147 | dev_err(dev, | ||
1148 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | ||
1149 | __func__, cmd); | ||
1150 | break; | ||
1151 | } | ||
1152 | |||
1153 | if (!is_power_of_2(setup_bind_args.num_gpfifo_entries)) { | ||
1154 | err = -EINVAL; | ||
1155 | gk20a_idle(ch->g); | ||
1156 | break; | ||
1157 | } | ||
1158 | err = nvgpu_channel_setup_bind(ch, &setup_bind_args); | ||
1159 | gk20a_idle(ch->g); | ||
1160 | break; | ||
1161 | } | ||
1122 | case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX: | 1162 | case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX: |
1123 | { | 1163 | { |
1124 | struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args = | 1164 | struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args = |
1125 | (struct nvgpu_alloc_gpfifo_ex_args *)buf; | 1165 | (struct nvgpu_alloc_gpfifo_ex_args *)buf; |
1126 | struct nvgpu_gpfifo_args gpfifo_args; | 1166 | struct nvgpu_setup_bind_args setup_bind_args; |
1127 | 1167 | ||
1128 | nvgpu_get_gpfifo_ex_args(alloc_gpfifo_ex_args, &gpfifo_args); | 1168 | nvgpu_get_gpfifo_ex_args(alloc_gpfifo_ex_args, &setup_bind_args); |
1129 | 1169 | ||
1130 | err = gk20a_busy(ch->g); | 1170 | err = gk20a_busy(ch->g); |
1131 | if (err) { | 1171 | if (err) { |
@@ -1140,7 +1180,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
1140 | gk20a_idle(ch->g); | 1180 | gk20a_idle(ch->g); |
1141 | break; | 1181 | break; |
1142 | } | 1182 | } |
1143 | err = gk20a_channel_alloc_gpfifo(ch, &gpfifo_args); | 1183 | err = nvgpu_channel_setup_bind(ch, &setup_bind_args); |
1144 | gk20a_idle(ch->g); | 1184 | gk20a_idle(ch->g); |
1145 | break; | 1185 | break; |
1146 | } | 1186 | } |
@@ -1148,9 +1188,9 @@ long gk20a_channel_ioctl(struct file *filp, | |||
1148 | { | 1188 | { |
1149 | struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args = | 1189 | struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args = |
1150 | (struct nvgpu_alloc_gpfifo_args *)buf; | 1190 | (struct nvgpu_alloc_gpfifo_args *)buf; |
1151 | struct nvgpu_gpfifo_args gpfifo_args; | 1191 | struct nvgpu_setup_bind_args setup_bind_args; |
1152 | 1192 | ||
1153 | nvgpu_get_gpfifo_args(alloc_gpfifo_args, &gpfifo_args); | 1193 | nvgpu_get_gpfifo_args(alloc_gpfifo_args, &setup_bind_args); |
1154 | 1194 | ||
1155 | err = gk20a_busy(ch->g); | 1195 | err = gk20a_busy(ch->g); |
1156 | if (err) { | 1196 | if (err) { |
@@ -1160,7 +1200,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
1160 | break; | 1200 | break; |
1161 | } | 1201 | } |
1162 | 1202 | ||
1163 | err = gk20a_channel_alloc_gpfifo(ch, &gpfifo_args); | 1203 | err = nvgpu_channel_setup_bind(ch, &setup_bind_args); |
1164 | gk20a_idle(ch->g); | 1204 | gk20a_idle(ch->g); |
1165 | break; | 1205 | break; |
1166 | } | 1206 | } |