summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-23 15:49:58 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-02 11:28:22 -0400
commit660c9a95104b37c947e0c2f6aeda4c92698b40f4 (patch)
treee00489bfb3b4c586015a555e3517e6fa8fc6b3a6 /drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
parent9adc49f94bd9df94e7d77b08eb2b6e98d7eb5758 (diff)
gpu: nvgpu: Move ctrl IOCTL creation to Linux module
Move all code related to ctrl devnode to under Linux module. JIRA NVGPU-16 Change-Id: I834b46ec4172076d7bde459168f1e6bc8c5d6c0c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1330802 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c1521
1 files changed, 1521 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
new file mode 100644
index 00000000..fa05deb9
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
@@ -0,0 +1,1521 @@
1/*
2 * Copyright (c) 2011-2017, NVIDIA Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/uaccess.h>
18#include <linux/cdev.h>
19#include <linux/file.h>
20#include <linux/anon_inodes.h>
21#include <linux/bitops.h>
22#include <uapi/linux/nvgpu.h>
23
24#include <nvgpu/kmem.h>
25
26#include "ioctl_ctrl.h"
27#include "gk20a/gk20a.h"
28#include "gk20a/fence_gk20a.h"
29
30#define HZ_TO_MHZ(a) ((a > 0xF414F9CD7) ? 0xffff : (a >> 32) ? \
31 (u32) ((a * 0x10C8ULL) >> 32) : (u16) ((u32) a/MHZ))
32#define MHZ_TO_HZ(a) ((u64)a * MHZ)
33
34struct gk20a_ctrl_priv {
35 struct device *dev;
36 struct gk20a *g;
37#ifdef CONFIG_ARCH_TEGRA_18x_SOC
38 struct nvgpu_clk_session *clk_session;
39#endif
40};
41
42int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
43{
44 struct gk20a *g;
45 struct gk20a_ctrl_priv *priv;
46 int err = 0;
47
48 gk20a_dbg_fn("");
49
50 g = container_of(inode->i_cdev,
51 struct gk20a, ctrl.cdev);
52 g = gk20a_get(g);
53 if (!g)
54 return -ENODEV;
55
56 priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv));
57 if (!priv) {
58 err = -ENOMEM;
59 goto free_ref;
60 }
61 filp->private_data = priv;
62 priv->dev = g->dev;
63 /*
64 * We dont close the arbiter fd's after driver teardown to support
65 * GPU_LOST events, so we store g here, instead of dereferencing the
66 * dev structure on teardown
67 */
68 priv->g = g;
69
70 if (!g->gr.sw_ready) {
71 err = gk20a_busy(g);
72 if (err)
73 goto free_ref;
74 gk20a_idle(g);
75 }
76
77#ifdef CONFIG_ARCH_TEGRA_18x_SOC
78 err = nvgpu_clk_arb_init_session(g, &priv->clk_session);
79#endif
80free_ref:
81 if (err)
82 gk20a_put(g);
83 return err;
84}
85int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
86{
87 struct gk20a_ctrl_priv *priv = filp->private_data;
88 struct gk20a *g = priv->g;
89
90 gk20a_dbg_fn("");
91
92#ifdef CONFIG_ARCH_TEGRA_18x_SOC
93 if (priv->clk_session)
94 nvgpu_clk_arb_release_session(g, priv->clk_session);
95#endif
96
97 gk20a_put(g);
98 nvgpu_kfree(g, priv);
99
100 return 0;
101}
102
103static long
104gk20a_ctrl_ioctl_gpu_characteristics(
105 struct gk20a *g,
106 struct nvgpu_gpu_get_characteristics *request)
107{
108 struct nvgpu_gpu_characteristics *pgpu = &g->gpu_characteristics;
109 long err = 0;
110
111 if (request->gpu_characteristics_buf_size > 0) {
112 size_t write_size = sizeof(*pgpu);
113
114 if (write_size > request->gpu_characteristics_buf_size)
115 write_size = request->gpu_characteristics_buf_size;
116
117 err = copy_to_user((void __user *)(uintptr_t)
118 request->gpu_characteristics_buf_addr,
119 pgpu, write_size);
120 }
121
122 if (err == 0)
123 request->gpu_characteristics_buf_size = sizeof(*pgpu);
124
125 return err;
126}
127
128static int gk20a_ctrl_prepare_compressible_read(
129 struct gk20a *g,
130 struct nvgpu_gpu_prepare_compressible_read_args *args)
131{
132 struct nvgpu_fence fence;
133 struct gk20a_fence *fence_out = NULL;
134 int ret = 0;
135 int flags = args->submit_flags;
136
137 fence.id = args->fence.syncpt_id;
138 fence.value = args->fence.syncpt_value;
139
140 ret = gk20a_prepare_compressible_read(g, args->handle,
141 args->request_compbits, args->offset,
142 args->compbits_hoffset, args->compbits_voffset,
143 args->scatterbuffer_offset,
144 args->width, args->height, args->block_height_log2,
145 flags, &fence, &args->valid_compbits,
146 &args->zbc_color, &fence_out);
147
148 if (ret)
149 return ret;
150
151 /* Convert fence_out to something we can pass back to user space. */
152 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
153 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
154 if (fence_out) {
155 int fd = gk20a_fence_install_fd(fence_out);
156 if (fd < 0)
157 ret = fd;
158 else
159 args->fence.fd = fd;
160 } else {
161 args->fence.fd = -1;
162 }
163 } else {
164 if (fence_out) {
165 args->fence.syncpt_id = fence_out->syncpt_id;
166 args->fence.syncpt_value =
167 fence_out->syncpt_value;
168 } else {
169 args->fence.syncpt_id = -1;
170 args->fence.syncpt_value = 0;
171 }
172 }
173 }
174 gk20a_fence_put(fence_out);
175
176 return 0;
177}
178
179static int gk20a_ctrl_mark_compressible_write(
180 struct gk20a *g,
181 struct nvgpu_gpu_mark_compressible_write_args *args)
182{
183 int ret;
184
185 ret = gk20a_mark_compressible_write(g, args->handle,
186 args->valid_compbits, args->offset, args->zbc_color);
187
188 return ret;
189}
190
191static int gk20a_ctrl_alloc_as(
192 struct gk20a *g,
193 struct nvgpu_alloc_as_args *args)
194{
195 struct gk20a_as_share *as_share;
196 int err;
197 int fd;
198 struct file *file;
199 char name[64];
200
201 err = get_unused_fd_flags(O_RDWR);
202 if (err < 0)
203 return err;
204 fd = err;
205
206 snprintf(name, sizeof(name), "nvhost-%s-fd%d", g->name, fd);
207
208 file = anon_inode_getfile(name, g->as.cdev.ops, NULL, O_RDWR);
209 if (IS_ERR(file)) {
210 err = PTR_ERR(file);
211 goto clean_up;
212 }
213
214 err = gk20a_as_alloc_share(&g->as, args->big_page_size, args->flags,
215 &as_share);
216 if (err)
217 goto clean_up_file;
218
219 fd_install(fd, file);
220 file->private_data = as_share;
221
222 args->as_fd = fd;
223 return 0;
224
225clean_up_file:
226 fput(file);
227clean_up:
228 put_unused_fd(fd);
229 return err;
230}
231
232static int gk20a_ctrl_open_tsg(struct gk20a *g,
233 struct nvgpu_gpu_open_tsg_args *args)
234{
235 int err;
236 int fd;
237 struct file *file;
238 char name[64];
239
240 err = get_unused_fd_flags(O_RDWR);
241 if (err < 0)
242 return err;
243 fd = err;
244
245 snprintf(name, sizeof(name), "nvgpu-%s-tsg%d", g->name, fd);
246
247 file = anon_inode_getfile(name, g->tsg.cdev.ops, NULL, O_RDWR);
248 if (IS_ERR(file)) {
249 err = PTR_ERR(file);
250 goto clean_up;
251 }
252
253 err = gk20a_tsg_open(g, file);
254 if (err)
255 goto clean_up_file;
256
257 fd_install(fd, file);
258 args->tsg_fd = fd;
259 return 0;
260
261clean_up_file:
262 fput(file);
263clean_up:
264 put_unused_fd(fd);
265 return err;
266}
267
268static int gk20a_ctrl_get_tpc_masks(struct gk20a *g,
269 struct nvgpu_gpu_get_tpc_masks_args *args)
270{
271 struct gr_gk20a *gr = &g->gr;
272 int err = 0;
273 const u32 gpc_tpc_mask_size = sizeof(u32) * gr->gpc_count;
274
275 if (args->mask_buf_size > 0) {
276 size_t write_size = gpc_tpc_mask_size;
277
278 if (write_size > args->mask_buf_size)
279 write_size = args->mask_buf_size;
280
281 err = copy_to_user((void __user *)(uintptr_t)
282 args->mask_buf_addr,
283 gr->gpc_tpc_mask, write_size);
284 }
285
286 if (err == 0)
287 args->mask_buf_size = gpc_tpc_mask_size;
288
289 return err;
290}
291
292static int gk20a_ctrl_get_fbp_l2_masks(
293 struct gk20a *g, struct nvgpu_gpu_get_fbp_l2_masks_args *args)
294{
295 struct gr_gk20a *gr = &g->gr;
296 int err = 0;
297 const u32 fbp_l2_mask_size = sizeof(u32) * gr->max_fbps_count;
298
299 if (args->mask_buf_size > 0) {
300 size_t write_size = fbp_l2_mask_size;
301
302 if (write_size > args->mask_buf_size)
303 write_size = args->mask_buf_size;
304
305 err = copy_to_user((void __user *)(uintptr_t)
306 args->mask_buf_addr,
307 gr->fbp_rop_l2_en_mask, write_size);
308 }
309
310 if (err == 0)
311 args->mask_buf_size = fbp_l2_mask_size;
312
313 return err;
314}
315
316static int nvgpu_gpu_ioctl_l2_fb_ops(struct gk20a *g,
317 struct nvgpu_gpu_l2_fb_args *args)
318{
319 int err = 0;
320
321 if (args->l2_flush)
322 g->ops.mm.l2_flush(g, args->l2_invalidate ? true : false);
323
324 if (args->fb_flush)
325 g->ops.mm.fb_flush(g);
326
327 return err;
328}
329
330/* Invalidate i-cache for kepler & maxwell */
331static int nvgpu_gpu_ioctl_inval_icache(
332 struct gk20a *g,
333 struct nvgpu_gpu_inval_icache_args *args)
334{
335 struct channel_gk20a *ch;
336 int err;
337
338 ch = gk20a_get_channel_from_file(args->channel_fd);
339 if (!ch)
340 return -EINVAL;
341
342 /* Take the global lock, since we'll be doing global regops */
343 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
344 err = g->ops.gr.inval_icache(g, ch);
345 nvgpu_mutex_release(&g->dbg_sessions_lock);
346 return err;
347}
348
349static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
350 struct gk20a *g,
351 struct nvgpu_gpu_mmu_debug_mode_args *args)
352{
353 if (gk20a_busy(g)) {
354 gk20a_err(dev_from_gk20a(g), "failed to power on gpu\n");
355 return -EINVAL;
356 }
357
358 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
359 g->ops.fb.set_debug_mode(g, args->state == 1);
360 nvgpu_mutex_release(&g->dbg_sessions_lock);
361
362 gk20a_idle(g);
363 return 0;
364}
365
366static int nvgpu_gpu_ioctl_set_debug_mode(
367 struct gk20a *g,
368 struct nvgpu_gpu_sm_debug_mode_args *args)
369{
370 struct channel_gk20a *ch;
371 int err;
372
373 ch = gk20a_get_channel_from_file(args->channel_fd);
374 if (!ch)
375 return -EINVAL;
376
377 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
378 if (g->ops.gr.set_sm_debug_mode)
379 err = g->ops.gr.set_sm_debug_mode(g, ch,
380 args->sms, !!args->enable);
381 else
382 err = -ENOSYS;
383 nvgpu_mutex_release(&g->dbg_sessions_lock);
384
385 return err;
386}
387
388static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g)
389{
390 int err;
391
392 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
393 err = g->ops.gr.trigger_suspend(g);
394 nvgpu_mutex_release(&g->dbg_sessions_lock);
395 return err;
396}
397
398static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
399 struct nvgpu_gpu_wait_pause_args *args)
400{
401 int err = 0;
402 struct warpstate *w_state;
403 u32 sm_count, size;
404
405 sm_count = g->gr.gpc_count * g->gr.tpc_count;
406 size = sm_count * sizeof(struct warpstate);
407 w_state = nvgpu_kzalloc(g, size);
408 if (!w_state)
409 return -ENOMEM;
410
411 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
412 g->ops.gr.wait_for_pause(g, w_state);
413
414 /* Copy to user space - pointed by "args->pwarpstate" */
415 if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, w_state, size)) {
416 gk20a_dbg_fn("copy_to_user failed!");
417 err = -EFAULT;
418 }
419
420 nvgpu_mutex_release(&g->dbg_sessions_lock);
421 nvgpu_kfree(g, w_state);
422 return err;
423}
424
425static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g)
426{
427 int err = 0;
428
429 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
430 err = g->ops.gr.resume_from_pause(g);
431 nvgpu_mutex_release(&g->dbg_sessions_lock);
432 return err;
433}
434
435static int nvgpu_gpu_ioctl_clear_sm_errors(struct gk20a *g)
436{
437 return g->ops.gr.clear_sm_errors(g);
438}
439
440static int nvgpu_gpu_ioctl_has_any_exception(
441 struct gk20a *g,
442 struct nvgpu_gpu_tpc_exception_en_status_args *args)
443{
444 u32 tpc_exception_en;
445
446 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
447 tpc_exception_en = g->ops.gr.tpc_enabled_exceptions(g);
448 nvgpu_mutex_release(&g->dbg_sessions_lock);
449
450 args->tpc_exception_en_sm_mask = tpc_exception_en;
451
452 return 0;
453}
454
455static int gk20a_ctrl_get_num_vsms(struct gk20a *g,
456 struct nvgpu_gpu_num_vsms *args)
457{
458 struct gr_gk20a *gr = &g->gr;
459 args->num_vsms = gr->no_of_sm;
460 return 0;
461}
462
463static int gk20a_ctrl_vsm_mapping(struct gk20a *g,
464 struct nvgpu_gpu_vsms_mapping *args)
465{
466 int err = 0;
467 struct gr_gk20a *gr = &g->gr;
468 size_t write_size = gr->no_of_sm *
469 sizeof(struct nvgpu_gpu_vsms_mapping_entry);
470 struct nvgpu_gpu_vsms_mapping_entry *vsms_buf;
471 u32 i;
472
473 vsms_buf = nvgpu_kzalloc(g, write_size);
474 if (vsms_buf == NULL)
475 return -ENOMEM;
476
477 for (i = 0; i < gr->no_of_sm; i++) {
478 vsms_buf[i].gpc_index = gr->sm_to_cluster[i].gpc_index;
479 vsms_buf[i].tpc_index = gr->sm_to_cluster[i].tpc_index;
480 }
481
482 err = copy_to_user((void __user *)(uintptr_t)
483 args->vsms_map_buf_addr,
484 vsms_buf, write_size);
485 nvgpu_kfree(g, vsms_buf);
486
487 return err;
488}
489
490static int gk20a_ctrl_get_buffer_info(
491 struct gk20a *g, struct nvgpu_gpu_get_buffer_info_args *args)
492{
493 return gk20a_mm_get_buffer_info(dev_from_gk20a(g), args->in.dmabuf_fd,
494 &args->out.id, &args->out.length);
495}
496
497static inline u64 get_cpu_timestamp_tsc(void)
498{
499 return ((u64) get_cycles());
500}
501
502static inline u64 get_cpu_timestamp_jiffies(void)
503{
504 return (get_jiffies_64() - INITIAL_JIFFIES);
505}
506
507static inline u64 get_cpu_timestamp_timeofday(void)
508{
509 struct timeval tv;
510
511 do_gettimeofday(&tv);
512 return timeval_to_jiffies(&tv);
513}
514
515static inline int get_timestamps_zipper(struct gk20a *g,
516 u64 (*get_cpu_timestamp)(void),
517 struct nvgpu_gpu_get_cpu_time_correlation_info_args *args)
518{
519 int err = 0;
520 unsigned int i = 0;
521
522 if (gk20a_busy(g)) {
523 gk20a_err(dev_from_gk20a(g), "GPU not powered on\n");
524 err = -EINVAL;
525 goto end;
526 }
527
528 for (i = 0; i < args->count; i++) {
529 err = g->ops.bus.read_ptimer(g, &args->samples[i].gpu_timestamp);
530 if (err)
531 return err;
532
533 args->samples[i].cpu_timestamp = get_cpu_timestamp();
534 }
535
536end:
537 gk20a_idle(g);
538 return err;
539}
540
541static int nvgpu_gpu_get_cpu_time_correlation_info(
542 struct gk20a *g,
543 struct nvgpu_gpu_get_cpu_time_correlation_info_args *args)
544{
545 int err = 0;
546 u64 (*get_cpu_timestamp)(void) = NULL;
547
548 if (args->count > NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_MAX_COUNT)
549 return -EINVAL;
550
551 switch (args->source_id) {
552 case NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_SRC_ID_TSC:
553 get_cpu_timestamp = get_cpu_timestamp_tsc;
554 break;
555 case NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_SRC_ID_JIFFIES:
556 get_cpu_timestamp = get_cpu_timestamp_jiffies;
557 break;
558 case NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_SRC_ID_TIMEOFDAY:
559 get_cpu_timestamp = get_cpu_timestamp_timeofday;
560 break;
561 default:
562 gk20a_err(dev_from_gk20a(g), "invalid cpu clock source id\n");
563 return -EINVAL;
564 }
565
566 err = get_timestamps_zipper(g, get_cpu_timestamp, args);
567 return err;
568}
569
570static int nvgpu_gpu_get_gpu_time(
571 struct gk20a *g,
572 struct nvgpu_gpu_get_gpu_time_args *args)
573{
574 u64 time;
575 int err;
576
577 err = gk20a_busy(g);
578 if (err)
579 return err;
580
581 err = g->ops.bus.read_ptimer(g, &time);
582 if (!err)
583 args->gpu_timestamp = time;
584
585 gk20a_idle(g);
586 return err;
587}
588
589static int nvgpu_gpu_get_engine_info(
590 struct gk20a *g,
591 struct nvgpu_gpu_get_engine_info_args *args)
592{
593 int err = 0;
594 u32 engine_enum = ENGINE_INVAL_GK20A;
595 u32 report_index = 0;
596 u32 engine_id_idx;
597 const u32 max_buffer_engines = args->engine_info_buf_size /
598 sizeof(struct nvgpu_gpu_get_engine_info_item);
599 struct nvgpu_gpu_get_engine_info_item __user *dst_item_list =
600 (void __user *)(uintptr_t)args->engine_info_buf_addr;
601
602 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;
603 ++engine_id_idx) {
604 u32 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
605 const struct fifo_engine_info_gk20a *src_info =
606 &g->fifo.engine_info[active_engine_id];
607 struct nvgpu_gpu_get_engine_info_item dst_info;
608
609 memset(&dst_info, 0, sizeof(dst_info));
610
611 engine_enum = src_info->engine_enum;
612
613 switch (engine_enum) {
614 case ENGINE_GR_GK20A:
615 dst_info.engine_id = NVGPU_GPU_ENGINE_ID_GR;
616 break;
617
618 case ENGINE_GRCE_GK20A:
619 dst_info.engine_id = NVGPU_GPU_ENGINE_ID_GR_COPY;
620 break;
621
622 case ENGINE_ASYNC_CE_GK20A:
623 dst_info.engine_id = NVGPU_GPU_ENGINE_ID_ASYNC_COPY;
624 break;
625
626 default:
627 gk20a_err(dev_from_gk20a(g), "Unmapped engine enum %u\n",
628 engine_enum);
629 continue;
630 }
631
632 dst_info.engine_instance = src_info->inst_id;
633 dst_info.runlist_id = src_info->runlist_id;
634
635 if (report_index < max_buffer_engines) {
636 err = copy_to_user(&dst_item_list[report_index],
637 &dst_info, sizeof(dst_info));
638 if (err)
639 goto clean_up;
640 }
641
642 ++report_index;
643 }
644
645 args->engine_info_buf_size =
646 report_index * sizeof(struct nvgpu_gpu_get_engine_info_item);
647
648clean_up:
649 return err;
650}
651
652static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
653 struct nvgpu_gpu_alloc_vidmem_args *args)
654{
655 u32 align = args->in.alignment ? args->in.alignment : SZ_4K;
656 int fd;
657
658 gk20a_dbg_fn("");
659
660 /* not yet supported */
661 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK))
662 return -EINVAL;
663
664 /* not yet supported */
665 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_VPR))
666 return -EINVAL;
667
668 if (args->in.size & (SZ_4K - 1))
669 return -EINVAL;
670
671 if (!args->in.size)
672 return -EINVAL;
673
674 if (align & (align - 1))
675 return -EINVAL;
676
677 if (align > roundup_pow_of_two(args->in.size)) {
678 /* log this special case, buddy allocator detail */
679 gk20a_warn(dev_from_gk20a(g),
680 "alignment larger than buffer size rounded up to power of 2 is not supported");
681 return -EINVAL;
682 }
683
684 fd = gk20a_vidmem_buf_alloc(g, args->in.size);
685 if (fd < 0)
686 return fd;
687
688 args->out.dmabuf_fd = fd;
689
690 gk20a_dbg_fn("done, fd=%d", fd);
691
692 return 0;
693}
694
695static int nvgpu_gpu_get_memory_state(struct gk20a *g,
696 struct nvgpu_gpu_get_memory_state_args *args)
697{
698 int err;
699
700 gk20a_dbg_fn("");
701
702 if (args->reserved[0] || args->reserved[1] ||
703 args->reserved[2] || args->reserved[3])
704 return -EINVAL;
705
706 err = gk20a_vidmem_get_space(g, &args->total_free_bytes);
707
708 gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes);
709
710 return err;
711}
712
713#ifdef CONFIG_ARCH_TEGRA_18x_SOC
714static int nvgpu_gpu_clk_get_vf_points(struct gk20a *g,
715 struct gk20a_ctrl_priv *priv,
716 struct nvgpu_gpu_clk_vf_points_args *args)
717{
718 struct nvgpu_gpu_clk_vf_point clk_point;
719 struct nvgpu_gpu_clk_vf_point __user *entry;
720 struct nvgpu_clk_session *session = priv->clk_session;
721 u32 clk_domains = 0;
722 int err;
723 u16 last_mhz;
724 u16 *fpoints;
725 u32 i;
726 u32 max_points = 0;
727 u32 num_points = 0;
728 u16 min_mhz;
729 u16 max_mhz;
730
731 gk20a_dbg_fn("");
732
733 if (!session || args->flags)
734 return -EINVAL;
735
736 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
737 args->num_entries = 0;
738
739 if (!nvgpu_clk_arb_is_valid_domain(g, args->clk_domain))
740 return -EINVAL;
741
742 err = nvgpu_clk_arb_get_arbiter_clk_f_points(g,
743 args->clk_domain, &max_points, NULL);
744 if (err)
745 return err;
746
747 if (!args->max_entries) {
748 args->max_entries = max_points;
749 return 0;
750 }
751
752 if (args->max_entries < max_points)
753 return -EINVAL;
754
755 err = nvgpu_clk_arb_get_arbiter_clk_range(g, args->clk_domain,
756 &min_mhz, &max_mhz);
757 if (err)
758 return err;
759
760 fpoints = nvgpu_kcalloc(g, max_points, sizeof(u16));
761 if (!fpoints)
762 return -ENOMEM;
763
764 err = nvgpu_clk_arb_get_arbiter_clk_f_points(g,
765 args->clk_domain, &max_points, fpoints);
766 if (err)
767 goto fail;
768
769 entry = (struct nvgpu_gpu_clk_vf_point __user *)
770 (uintptr_t)args->clk_vf_point_entries;
771
772 last_mhz = 0;
773 num_points = 0;
774 for (i = 0; (i < max_points) && !err; i++) {
775
776 /* filter out duplicate frequencies */
777 if (fpoints[i] == last_mhz)
778 continue;
779
780 /* filter out out-of-range frequencies */
781 if ((fpoints[i] < min_mhz) || (fpoints[i] > max_mhz))
782 continue;
783
784 last_mhz = fpoints[i];
785 clk_point.freq_hz = MHZ_TO_HZ(fpoints[i]);
786
787 err = copy_to_user((void __user *)entry, &clk_point,
788 sizeof(clk_point));
789
790 num_points++;
791 entry++;
792 }
793
794 args->num_entries = num_points;
795
796fail:
797 nvgpu_kfree(g, fpoints);
798 return err;
799}
800
801static int nvgpu_gpu_clk_get_range(struct gk20a *g,
802 struct gk20a_ctrl_priv *priv,
803 struct nvgpu_gpu_clk_range_args *args)
804{
805 struct nvgpu_gpu_clk_range clk_range;
806 struct nvgpu_gpu_clk_range __user *entry;
807 struct nvgpu_clk_session *session = priv->clk_session;
808
809 u32 clk_domains = 0;
810 u32 num_domains;
811 u32 num_entries;
812 u32 i;
813 int bit;
814 int err;
815 u16 min_mhz, max_mhz;
816
817 gk20a_dbg_fn("");
818
819 if (!session)
820 return -EINVAL;
821
822 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
823 num_domains = hweight_long(clk_domains);
824
825 if (!args->flags) {
826 if (!args->num_entries) {
827 args->num_entries = num_domains;
828 return 0;
829 }
830
831 if (args->num_entries < num_domains)
832 return -EINVAL;
833
834 args->num_entries = 0;
835 num_entries = num_domains;
836
837 } else {
838 if (args->flags != NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS)
839 return -EINVAL;
840
841 num_entries = args->num_entries;
842 if (num_entries > num_domains)
843 return -EINVAL;
844 }
845
846 entry = (struct nvgpu_gpu_clk_range __user *)
847 (uintptr_t)args->clk_range_entries;
848
849 for (i = 0; i < num_entries; i++, entry++) {
850
851 if (args->flags == NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) {
852 if (copy_from_user(&clk_range, (void __user *)entry,
853 sizeof(clk_range)))
854 return -EFAULT;
855 } else {
856 bit = ffs(clk_domains) - 1;
857 clk_range.clk_domain = bit;
858 clk_domains &= ~BIT(bit);
859 }
860
861 clk_range.flags = 0;
862 err = nvgpu_clk_arb_get_arbiter_clk_range(g,
863 clk_range.clk_domain,
864 &min_mhz, &max_mhz);
865 clk_range.min_hz = MHZ_TO_HZ(min_mhz);
866 clk_range.max_hz = MHZ_TO_HZ(max_mhz);
867
868 if (err)
869 return err;
870
871 err = copy_to_user(entry, &clk_range, sizeof(clk_range));
872 if (err)
873 return -EFAULT;
874 }
875
876 args->num_entries = num_entries;
877
878 return 0;
879}
880
881
882static int nvgpu_gpu_clk_set_info(struct gk20a *g,
883 struct gk20a_ctrl_priv *priv,
884 struct nvgpu_gpu_clk_set_info_args *args)
885{
886 struct nvgpu_gpu_clk_info clk_info;
887 struct nvgpu_gpu_clk_info __user *entry;
888 struct nvgpu_clk_session *session = priv->clk_session;
889
890 int fd;
891 u32 clk_domains = 0;
892 u16 freq_mhz;
893 int i;
894 int ret;
895
896 gk20a_dbg_fn("");
897
898 if (!session || args->flags)
899 return -EINVAL;
900
901 gk20a_dbg_info("line=%d", __LINE__);
902
903 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
904 if (!clk_domains)
905 return -EINVAL;
906
907 entry = (struct nvgpu_gpu_clk_info __user *)
908 (uintptr_t)args->clk_info_entries;
909
910 gk20a_dbg_info("line=%d", __LINE__);
911
912 for (i = 0; i < args->num_entries; i++, entry++) {
913
914 gk20a_dbg_info("line=%d", __LINE__);
915 if (copy_from_user(&clk_info, entry, sizeof(clk_info)))
916 return -EFAULT;
917
918 gk20a_dbg_info("i=%d domain=0x%08x", i, clk_info.clk_domain);
919
920 if (!nvgpu_clk_arb_is_valid_domain(g, clk_info.clk_domain))
921 return -EINVAL;
922 }
923
924 entry = (struct nvgpu_gpu_clk_info __user *)
925 (uintptr_t)args->clk_info_entries;
926
927 ret = nvgpu_clk_arb_install_request_fd(g, session, &fd);
928 if (ret < 0)
929 return ret;
930
931 for (i = 0; i < args->num_entries; i++, entry++) {
932
933 if (copy_from_user(&clk_info, (void __user *)entry,
934 sizeof(clk_info)))
935 return -EFAULT;
936 freq_mhz = HZ_TO_MHZ(clk_info.freq_hz);
937
938 nvgpu_clk_arb_set_session_target_mhz(session, fd,
939 clk_info.clk_domain, freq_mhz);
940 }
941
942 ret = nvgpu_clk_arb_commit_request_fd(g, session, fd);
943 if (ret < 0)
944 return ret;
945
946 args->completion_fd = fd;
947
948 return ret;
949}
950
951static int nvgpu_gpu_clk_get_info(struct gk20a *g,
952 struct gk20a_ctrl_priv *priv,
953 struct nvgpu_gpu_clk_get_info_args *args)
954{
955 struct nvgpu_gpu_clk_info clk_info;
956 struct nvgpu_gpu_clk_info __user *entry;
957 struct nvgpu_clk_session *session = priv->clk_session;
958 u32 clk_domains = 0;
959 u32 num_domains;
960 u32 num_entries;
961 u32 i;
962 u16 freq_mhz;
963 int err;
964 int bit;
965
966 gk20a_dbg_fn("");
967
968 if (!session)
969 return -EINVAL;
970
971 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
972 num_domains = hweight_long(clk_domains);
973
974 if (!args->flags) {
975 if (!args->num_entries) {
976 args->num_entries = num_domains;
977 return 0;
978 }
979
980 if (args->num_entries < num_domains)
981 return -EINVAL;
982
983 args->num_entries = 0;
984 num_entries = num_domains;
985
986 } else {
987 if (args->flags != NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS)
988 return -EINVAL;
989
990 num_entries = args->num_entries;
991 if (num_entries > num_domains * 3)
992 return -EINVAL;
993 }
994
995 entry = (struct nvgpu_gpu_clk_info __user *)
996 (uintptr_t)args->clk_info_entries;
997
998 for (i = 0; i < num_entries; i++, entry++) {
999
1000 if (args->flags == NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) {
1001 if (copy_from_user(&clk_info, (void __user *)entry,
1002 sizeof(clk_info)))
1003 return -EFAULT;
1004 } else {
1005 bit = ffs(clk_domains) - 1;
1006 clk_info.clk_domain = bit;
1007 clk_domains &= ~BIT(bit);
1008 clk_info.clk_type = args->clk_type;
1009 }
1010
1011 switch (clk_info.clk_type) {
1012 case NVGPU_GPU_CLK_TYPE_TARGET:
1013 err = nvgpu_clk_arb_get_session_target_mhz(session,
1014 clk_info.clk_domain, &freq_mhz);
1015 break;
1016 case NVGPU_GPU_CLK_TYPE_ACTUAL:
1017 err = nvgpu_clk_arb_get_arbiter_actual_mhz(g,
1018 clk_info.clk_domain, &freq_mhz);
1019 break;
1020 case NVGPU_GPU_CLK_TYPE_EFFECTIVE:
1021 err = nvgpu_clk_arb_get_arbiter_effective_mhz(g,
1022 clk_info.clk_domain, &freq_mhz);
1023 break;
1024 default:
1025 freq_mhz = 0;
1026 err = -EINVAL;
1027 break;
1028 }
1029 if (err)
1030 return err;
1031
1032 clk_info.flags = 0;
1033 clk_info.freq_hz = MHZ_TO_HZ(freq_mhz);
1034
1035 err = copy_to_user((void __user *)entry, &clk_info,
1036 sizeof(clk_info));
1037 if (err)
1038 return -EFAULT;
1039 }
1040
1041 args->num_entries = num_entries;
1042
1043 return 0;
1044}
1045
1046static int nvgpu_gpu_get_event_fd(struct gk20a *g,
1047 struct gk20a_ctrl_priv *priv,
1048 struct nvgpu_gpu_get_event_fd_args *args)
1049{
1050 struct nvgpu_clk_session *session = priv->clk_session;
1051
1052 gk20a_dbg_fn("");
1053
1054 if (!session)
1055 return -EINVAL;
1056
1057 return nvgpu_clk_arb_install_event_fd(g, session, &args->event_fd,
1058 args->flags);
1059}
1060
1061static int nvgpu_gpu_get_voltage(struct gk20a *g,
1062 struct nvgpu_gpu_get_voltage_args *args)
1063{
1064 int err = -EINVAL;
1065
1066 gk20a_dbg_fn("");
1067
1068 if (args->reserved)
1069 return -EINVAL;
1070
1071 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_VOLTAGE))
1072 return -EINVAL;
1073
1074 err = gk20a_busy(g);
1075 if (err)
1076 return err;
1077
1078 switch (args->which) {
1079 case NVGPU_GPU_VOLTAGE_CORE:
1080 err = volt_get_voltage(g, CTRL_VOLT_DOMAIN_LOGIC, &args->voltage);
1081 break;
1082 case NVGPU_GPU_VOLTAGE_SRAM:
1083 err = volt_get_voltage(g, CTRL_VOLT_DOMAIN_SRAM, &args->voltage);
1084 break;
1085 case NVGPU_GPU_VOLTAGE_BUS:
1086 err = pmgr_pwr_devices_get_voltage(g, &args->voltage);
1087 break;
1088 default:
1089 err = -EINVAL;
1090 }
1091
1092 gk20a_idle(g);
1093
1094 return err;
1095}
1096
1097static int nvgpu_gpu_get_current(struct gk20a *g,
1098 struct nvgpu_gpu_get_current_args *args)
1099{
1100 int err;
1101
1102 gk20a_dbg_fn("");
1103
1104 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1105 return -EINVAL;
1106
1107 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_CURRENT))
1108 return -EINVAL;
1109
1110 err = gk20a_busy(g);
1111 if (err)
1112 return err;
1113
1114 err = pmgr_pwr_devices_get_current(g, &args->currnt);
1115
1116 gk20a_idle(g);
1117
1118 return err;
1119}
1120
1121static int nvgpu_gpu_get_power(struct gk20a *g,
1122 struct nvgpu_gpu_get_power_args *args)
1123{
1124 int err;
1125
1126 gk20a_dbg_fn("");
1127
1128 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1129 return -EINVAL;
1130
1131 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_POWER))
1132 return -EINVAL;
1133
1134 err = gk20a_busy(g);
1135 if (err)
1136 return err;
1137
1138 err = pmgr_pwr_devices_get_power(g, &args->power);
1139
1140 gk20a_idle(g);
1141
1142 return err;
1143}
1144
1145static int nvgpu_gpu_get_temperature(struct gk20a *g,
1146 struct nvgpu_gpu_get_temperature_args *args)
1147{
1148 int err;
1149 u32 temp_f24_8;
1150
1151 gk20a_dbg_fn("");
1152
1153 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1154 return -EINVAL;
1155
1156 if (!g->ops.therm.get_internal_sensor_curr_temp)
1157 return -EINVAL;
1158
1159 err = gk20a_busy(g);
1160 if (err)
1161 return err;
1162
1163 err = g->ops.therm.get_internal_sensor_curr_temp(g, &temp_f24_8);
1164
1165 gk20a_idle(g);
1166
1167 args->temp_f24_8 = (s32)temp_f24_8;
1168
1169 return err;
1170}
1171#endif
1172
1173static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g,
1174 struct nvgpu_gpu_set_therm_alert_limit_args *args)
1175{
1176 int err;
1177
1178 gk20a_dbg_fn("");
1179
1180 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1181 return -EINVAL;
1182
1183 if (!g->ops.therm.configure_therm_alert)
1184 return -EINVAL;
1185
1186 err = gk20a_busy(g);
1187 if (err)
1188 return err;
1189
1190 err = g->ops.therm.configure_therm_alert(g, args->temp_f24_8);
1191
1192 gk20a_idle(g);
1193
1194 return err;
1195}
1196
1197long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1198{
1199 struct gk20a_ctrl_priv *priv = filp->private_data;
1200 struct gk20a *g = priv->g;
1201 struct nvgpu_gpu_zcull_get_ctx_size_args *get_ctx_size_args;
1202 struct nvgpu_gpu_zcull_get_info_args *get_info_args;
1203 struct nvgpu_gpu_zbc_set_table_args *set_table_args;
1204 struct nvgpu_gpu_zbc_query_table_args *query_table_args;
1205 u8 buf[NVGPU_GPU_IOCTL_MAX_ARG_SIZE];
1206 struct gr_zcull_info *zcull_info;
1207 struct zbc_entry *zbc_val;
1208 struct zbc_query_params *zbc_tbl;
1209 int i, err = 0;
1210
1211 gk20a_dbg_fn("");
1212
1213 if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) ||
1214 (_IOC_NR(cmd) == 0) ||
1215 (_IOC_NR(cmd) > NVGPU_GPU_IOCTL_LAST) ||
1216 (_IOC_SIZE(cmd) > NVGPU_GPU_IOCTL_MAX_ARG_SIZE))
1217 return -EINVAL;
1218
1219 memset(buf, 0, sizeof(buf));
1220 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1221 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1222 return -EFAULT;
1223 }
1224
1225 if (!g->gr.sw_ready) {
1226 err = gk20a_busy(g);
1227 if (err)
1228 return err;
1229
1230 gk20a_idle(g);
1231 }
1232
1233 switch (cmd) {
1234 case NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE:
1235 get_ctx_size_args = (struct nvgpu_gpu_zcull_get_ctx_size_args *)buf;
1236
1237 get_ctx_size_args->size = gr_gk20a_get_ctxsw_zcull_size(g, &g->gr);
1238
1239 break;
1240 case NVGPU_GPU_IOCTL_ZCULL_GET_INFO:
1241 get_info_args = (struct nvgpu_gpu_zcull_get_info_args *)buf;
1242
1243 memset(get_info_args, 0, sizeof(struct nvgpu_gpu_zcull_get_info_args));
1244
1245 zcull_info = nvgpu_kzalloc(g, sizeof(struct gr_zcull_info));
1246 if (zcull_info == NULL)
1247 return -ENOMEM;
1248
1249 err = g->ops.gr.get_zcull_info(g, &g->gr, zcull_info);
1250 if (err) {
1251 nvgpu_kfree(g, zcull_info);
1252 break;
1253 }
1254
1255 get_info_args->width_align_pixels = zcull_info->width_align_pixels;
1256 get_info_args->height_align_pixels = zcull_info->height_align_pixels;
1257 get_info_args->pixel_squares_by_aliquots = zcull_info->pixel_squares_by_aliquots;
1258 get_info_args->aliquot_total = zcull_info->aliquot_total;
1259 get_info_args->region_byte_multiplier = zcull_info->region_byte_multiplier;
1260 get_info_args->region_header_size = zcull_info->region_header_size;
1261 get_info_args->subregion_header_size = zcull_info->subregion_header_size;
1262 get_info_args->subregion_width_align_pixels = zcull_info->subregion_width_align_pixels;
1263 get_info_args->subregion_height_align_pixels = zcull_info->subregion_height_align_pixels;
1264 get_info_args->subregion_count = zcull_info->subregion_count;
1265
1266 nvgpu_kfree(g, zcull_info);
1267 break;
1268 case NVGPU_GPU_IOCTL_ZBC_SET_TABLE:
1269 set_table_args = (struct nvgpu_gpu_zbc_set_table_args *)buf;
1270
1271 zbc_val = nvgpu_kzalloc(g, sizeof(struct zbc_entry));
1272 if (zbc_val == NULL)
1273 return -ENOMEM;
1274
1275 zbc_val->format = set_table_args->format;
1276 zbc_val->type = set_table_args->type;
1277
1278 switch (zbc_val->type) {
1279 case GK20A_ZBC_TYPE_COLOR:
1280 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
1281 zbc_val->color_ds[i] = set_table_args->color_ds[i];
1282 zbc_val->color_l2[i] = set_table_args->color_l2[i];
1283 }
1284 break;
1285 case GK20A_ZBC_TYPE_DEPTH:
1286 case T19X_ZBC:
1287 zbc_val->depth = set_table_args->depth;
1288 break;
1289 default:
1290 err = -EINVAL;
1291 }
1292
1293 if (!err) {
1294 err = gk20a_busy(g);
1295 if (!err) {
1296 err = g->ops.gr.zbc_set_table(g, &g->gr,
1297 zbc_val);
1298 gk20a_idle(g);
1299 }
1300 }
1301
1302 if (zbc_val)
1303 nvgpu_kfree(g, zbc_val);
1304 break;
1305 case NVGPU_GPU_IOCTL_ZBC_QUERY_TABLE:
1306 query_table_args = (struct nvgpu_gpu_zbc_query_table_args *)buf;
1307
1308 zbc_tbl = nvgpu_kzalloc(g, sizeof(struct zbc_query_params));
1309 if (zbc_tbl == NULL)
1310 return -ENOMEM;
1311
1312 zbc_tbl->type = query_table_args->type;
1313 zbc_tbl->index_size = query_table_args->index_size;
1314
1315 err = g->ops.gr.zbc_query_table(g, &g->gr, zbc_tbl);
1316
1317 if (!err) {
1318 switch (zbc_tbl->type) {
1319 case GK20A_ZBC_TYPE_COLOR:
1320 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
1321 query_table_args->color_ds[i] = zbc_tbl->color_ds[i];
1322 query_table_args->color_l2[i] = zbc_tbl->color_l2[i];
1323 }
1324 break;
1325 case GK20A_ZBC_TYPE_DEPTH:
1326 case T19X_ZBC:
1327 query_table_args->depth = zbc_tbl->depth;
1328 break;
1329 case GK20A_ZBC_TYPE_INVALID:
1330 query_table_args->index_size = zbc_tbl->index_size;
1331 break;
1332 default:
1333 err = -EINVAL;
1334 }
1335 if (!err) {
1336 query_table_args->format = zbc_tbl->format;
1337 query_table_args->ref_cnt = zbc_tbl->ref_cnt;
1338 }
1339 }
1340
1341 if (zbc_tbl)
1342 nvgpu_kfree(g, zbc_tbl);
1343 break;
1344
1345 case NVGPU_GPU_IOCTL_GET_CHARACTERISTICS:
1346 err = gk20a_ctrl_ioctl_gpu_characteristics(
1347 g, (struct nvgpu_gpu_get_characteristics *)buf);
1348 break;
1349 case NVGPU_GPU_IOCTL_PREPARE_COMPRESSIBLE_READ:
1350 err = gk20a_ctrl_prepare_compressible_read(g,
1351 (struct nvgpu_gpu_prepare_compressible_read_args *)buf);
1352 break;
1353 case NVGPU_GPU_IOCTL_MARK_COMPRESSIBLE_WRITE:
1354 err = gk20a_ctrl_mark_compressible_write(g,
1355 (struct nvgpu_gpu_mark_compressible_write_args *)buf);
1356 break;
1357 case NVGPU_GPU_IOCTL_ALLOC_AS:
1358 err = gk20a_ctrl_alloc_as(g,
1359 (struct nvgpu_alloc_as_args *)buf);
1360 break;
1361 case NVGPU_GPU_IOCTL_OPEN_TSG:
1362 err = gk20a_ctrl_open_tsg(g,
1363 (struct nvgpu_gpu_open_tsg_args *)buf);
1364 break;
1365 case NVGPU_GPU_IOCTL_GET_TPC_MASKS:
1366 err = gk20a_ctrl_get_tpc_masks(g,
1367 (struct nvgpu_gpu_get_tpc_masks_args *)buf);
1368 break;
1369 case NVGPU_GPU_IOCTL_GET_FBP_L2_MASKS:
1370 err = gk20a_ctrl_get_fbp_l2_masks(g,
1371 (struct nvgpu_gpu_get_fbp_l2_masks_args *)buf);
1372 break;
1373 case NVGPU_GPU_IOCTL_OPEN_CHANNEL:
1374 /* this arg type here, but ..gpu_open_channel_args in nvgpu.h
1375 * for consistency - they are the same */
1376 err = gk20a_channel_open_ioctl(g,
1377 (struct nvgpu_channel_open_args *)buf);
1378 break;
1379 case NVGPU_GPU_IOCTL_FLUSH_L2:
1380 err = nvgpu_gpu_ioctl_l2_fb_ops(g,
1381 (struct nvgpu_gpu_l2_fb_args *)buf);
1382 break;
1383 case NVGPU_GPU_IOCTL_INVAL_ICACHE:
1384 err = gr_gk20a_elpg_protected_call(g,
1385 nvgpu_gpu_ioctl_inval_icache(g, (struct nvgpu_gpu_inval_icache_args *)buf));
1386 break;
1387
1388 case NVGPU_GPU_IOCTL_SET_MMUDEBUG_MODE:
1389 err = nvgpu_gpu_ioctl_set_mmu_debug_mode(g,
1390 (struct nvgpu_gpu_mmu_debug_mode_args *)buf);
1391 break;
1392
1393 case NVGPU_GPU_IOCTL_SET_SM_DEBUG_MODE:
1394 err = gr_gk20a_elpg_protected_call(g,
1395 nvgpu_gpu_ioctl_set_debug_mode(g, (struct nvgpu_gpu_sm_debug_mode_args *)buf));
1396 break;
1397
1398 case NVGPU_GPU_IOCTL_TRIGGER_SUSPEND:
1399 err = nvgpu_gpu_ioctl_trigger_suspend(g);
1400 break;
1401
1402 case NVGPU_GPU_IOCTL_WAIT_FOR_PAUSE:
1403 err = nvgpu_gpu_ioctl_wait_for_pause(g,
1404 (struct nvgpu_gpu_wait_pause_args *)buf);
1405 break;
1406
1407 case NVGPU_GPU_IOCTL_RESUME_FROM_PAUSE:
1408 err = nvgpu_gpu_ioctl_resume_from_pause(g);
1409 break;
1410
1411 case NVGPU_GPU_IOCTL_CLEAR_SM_ERRORS:
1412 err = nvgpu_gpu_ioctl_clear_sm_errors(g);
1413 break;
1414
1415 case NVGPU_GPU_IOCTL_GET_TPC_EXCEPTION_EN_STATUS:
1416 err = nvgpu_gpu_ioctl_has_any_exception(g,
1417 (struct nvgpu_gpu_tpc_exception_en_status_args *)buf);
1418 break;
1419
1420 case NVGPU_GPU_IOCTL_NUM_VSMS:
1421 err = gk20a_ctrl_get_num_vsms(g,
1422 (struct nvgpu_gpu_num_vsms *)buf);
1423 break;
1424 case NVGPU_GPU_IOCTL_VSMS_MAPPING:
1425 err = gk20a_ctrl_vsm_mapping(g,
1426 (struct nvgpu_gpu_vsms_mapping *)buf);
1427 break;
1428
1429 case NVGPU_GPU_IOCTL_GET_BUFFER_INFO:
1430 err = gk20a_ctrl_get_buffer_info(g,
1431 (struct nvgpu_gpu_get_buffer_info_args *)buf);
1432 break;
1433
1434 case NVGPU_GPU_IOCTL_GET_CPU_TIME_CORRELATION_INFO:
1435 err = nvgpu_gpu_get_cpu_time_correlation_info(g,
1436 (struct nvgpu_gpu_get_cpu_time_correlation_info_args *)buf);
1437 break;
1438
1439 case NVGPU_GPU_IOCTL_GET_GPU_TIME:
1440 err = nvgpu_gpu_get_gpu_time(g,
1441 (struct nvgpu_gpu_get_gpu_time_args *)buf);
1442 break;
1443
1444 case NVGPU_GPU_IOCTL_GET_ENGINE_INFO:
1445 err = nvgpu_gpu_get_engine_info(g,
1446 (struct nvgpu_gpu_get_engine_info_args *)buf);
1447 break;
1448
1449 case NVGPU_GPU_IOCTL_ALLOC_VIDMEM:
1450 err = nvgpu_gpu_alloc_vidmem(g,
1451 (struct nvgpu_gpu_alloc_vidmem_args *)buf);
1452 break;
1453
1454 case NVGPU_GPU_IOCTL_GET_MEMORY_STATE:
1455 err = nvgpu_gpu_get_memory_state(g,
1456 (struct nvgpu_gpu_get_memory_state_args *)buf);
1457 break;
1458
1459#ifdef CONFIG_ARCH_TEGRA_18x_SOC
1460 case NVGPU_GPU_IOCTL_CLK_GET_RANGE:
1461 err = nvgpu_gpu_clk_get_range(g, priv,
1462 (struct nvgpu_gpu_clk_range_args *)buf);
1463 break;
1464
1465 case NVGPU_GPU_IOCTL_CLK_GET_VF_POINTS:
1466 err = nvgpu_gpu_clk_get_vf_points(g, priv,
1467 (struct nvgpu_gpu_clk_vf_points_args *)buf);
1468 break;
1469
1470 case NVGPU_GPU_IOCTL_CLK_SET_INFO:
1471 err = nvgpu_gpu_clk_set_info(g, priv,
1472 (struct nvgpu_gpu_clk_set_info_args *)buf);
1473 break;
1474
1475 case NVGPU_GPU_IOCTL_CLK_GET_INFO:
1476 err = nvgpu_gpu_clk_get_info(g, priv,
1477 (struct nvgpu_gpu_clk_get_info_args *)buf);
1478 break;
1479
1480 case NVGPU_GPU_IOCTL_GET_EVENT_FD:
1481 err = nvgpu_gpu_get_event_fd(g, priv,
1482 (struct nvgpu_gpu_get_event_fd_args *)buf);
1483 break;
1484
1485 case NVGPU_GPU_IOCTL_GET_VOLTAGE:
1486 err = nvgpu_gpu_get_voltage(g,
1487 (struct nvgpu_gpu_get_voltage_args *)buf);
1488 break;
1489
1490 case NVGPU_GPU_IOCTL_GET_CURRENT:
1491 err = nvgpu_gpu_get_current(g,
1492 (struct nvgpu_gpu_get_current_args *)buf);
1493 break;
1494
1495 case NVGPU_GPU_IOCTL_GET_POWER:
1496 err = nvgpu_gpu_get_power(g,
1497 (struct nvgpu_gpu_get_power_args *)buf);
1498 break;
1499
1500 case NVGPU_GPU_IOCTL_GET_TEMPERATURE:
1501 err = nvgpu_gpu_get_temperature(g,
1502 (struct nvgpu_gpu_get_temperature_args *)buf);
1503 break;
1504#endif
1505
1506 case NVGPU_GPU_IOCTL_SET_THERM_ALERT_LIMIT:
1507 err = nvgpu_gpu_set_therm_alert_limit(g,
1508 (struct nvgpu_gpu_set_therm_alert_limit_args *)buf);
1509 break;
1510
1511 default:
1512 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
1513 err = -ENOTTY;
1514 break;
1515 }
1516
1517 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1518 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1519
1520 return err;
1521}