summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-23 15:49:58 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-02 11:28:22 -0400
commit660c9a95104b37c947e0c2f6aeda4c92698b40f4 (patch)
treee00489bfb3b4c586015a555e3517e6fa8fc6b3a6 /drivers/gpu/nvgpu/gk20a
parent9adc49f94bd9df94e7d77b08eb2b6e98d7eb5758 (diff)
gpu: nvgpu: Move ctrl IOCTL creation to Linux module
Move all code related to ctrl devnode to under Linux module. JIRA NVGPU-16 Change-Id: I834b46ec4172076d7bde459168f1e6bc8c5d6c0c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1330802 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c1522
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h23
2 files changed, 0 insertions, 1545 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
deleted file mode 100644
index fba39a50..00000000
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
+++ /dev/null
@@ -1,1522 +0,0 @@
1/*
2 * Copyright (c) 2011-2017, NVIDIA Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/highmem.h>
18#include <linux/cdev.h>
19#include <linux/file.h>
20#include <linux/anon_inodes.h>
21#include <linux/nvgpu.h>
22#include <linux/bitops.h>
23#include <uapi/linux/nvgpu.h>
24#include <linux/delay.h>
25
26#include <nvgpu/kmem.h>
27
28#include "gk20a.h"
29#include "fence_gk20a.h"
30
31#define HZ_TO_MHZ(a) ((a > 0xF414F9CD7) ? 0xffff : (a >> 32) ? \
32 (u32) ((a * 0x10C8ULL) >> 32) : (u16) ((u32) a/MHZ))
33#define MHZ_TO_HZ(a) ((u64)a * MHZ)
34
35struct gk20a_ctrl_priv {
36 struct device *dev;
37 struct gk20a *g;
38#ifdef CONFIG_ARCH_TEGRA_18x_SOC
39 struct nvgpu_clk_session *clk_session;
40#endif
41};
42
43int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
44{
45 struct gk20a *g;
46 struct gk20a_ctrl_priv *priv;
47 int err = 0;
48
49 gk20a_dbg_fn("");
50
51 g = container_of(inode->i_cdev,
52 struct gk20a, ctrl.cdev);
53 g = gk20a_get(g);
54 if (!g)
55 return -ENODEV;
56
57 priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv));
58 if (!priv) {
59 err = -ENOMEM;
60 goto free_ref;
61 }
62 filp->private_data = priv;
63 priv->dev = g->dev;
64 /*
65 * We dont close the arbiter fd's after driver teardown to support
66 * GPU_LOST events, so we store g here, instead of dereferencing the
67 * dev structure on teardown
68 */
69 priv->g = g;
70
71 if (!g->gr.sw_ready) {
72 err = gk20a_busy(g);
73 if (err)
74 goto free_ref;
75 gk20a_idle(g);
76 }
77
78#ifdef CONFIG_ARCH_TEGRA_18x_SOC
79 err = nvgpu_clk_arb_init_session(g, &priv->clk_session);
80#endif
81free_ref:
82 if (err)
83 gk20a_put(g);
84 return err;
85}
86int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
87{
88 struct gk20a_ctrl_priv *priv = filp->private_data;
89 struct gk20a *g = priv->g;
90
91 gk20a_dbg_fn("");
92
93#ifdef CONFIG_ARCH_TEGRA_18x_SOC
94 if (priv->clk_session)
95 nvgpu_clk_arb_release_session(g, priv->clk_session);
96#endif
97
98 gk20a_put(g);
99 nvgpu_kfree(g, priv);
100
101 return 0;
102}
103
104static long
105gk20a_ctrl_ioctl_gpu_characteristics(
106 struct gk20a *g,
107 struct nvgpu_gpu_get_characteristics *request)
108{
109 struct nvgpu_gpu_characteristics *pgpu = &g->gpu_characteristics;
110 long err = 0;
111
112 if (request->gpu_characteristics_buf_size > 0) {
113 size_t write_size = sizeof(*pgpu);
114
115 if (write_size > request->gpu_characteristics_buf_size)
116 write_size = request->gpu_characteristics_buf_size;
117
118 err = copy_to_user((void __user *)(uintptr_t)
119 request->gpu_characteristics_buf_addr,
120 pgpu, write_size);
121 }
122
123 if (err == 0)
124 request->gpu_characteristics_buf_size = sizeof(*pgpu);
125
126 return err;
127}
128
129static int gk20a_ctrl_prepare_compressible_read(
130 struct gk20a *g,
131 struct nvgpu_gpu_prepare_compressible_read_args *args)
132{
133 struct nvgpu_fence fence;
134 struct gk20a_fence *fence_out = NULL;
135 int ret = 0;
136 int flags = args->submit_flags;
137
138 fence.id = args->fence.syncpt_id;
139 fence.value = args->fence.syncpt_value;
140
141 ret = gk20a_prepare_compressible_read(g, args->handle,
142 args->request_compbits, args->offset,
143 args->compbits_hoffset, args->compbits_voffset,
144 args->scatterbuffer_offset,
145 args->width, args->height, args->block_height_log2,
146 flags, &fence, &args->valid_compbits,
147 &args->zbc_color, &fence_out);
148
149 if (ret)
150 return ret;
151
152 /* Convert fence_out to something we can pass back to user space. */
153 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
154 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
155 if (fence_out) {
156 int fd = gk20a_fence_install_fd(fence_out);
157 if (fd < 0)
158 ret = fd;
159 else
160 args->fence.fd = fd;
161 } else {
162 args->fence.fd = -1;
163 }
164 } else {
165 if (fence_out) {
166 args->fence.syncpt_id = fence_out->syncpt_id;
167 args->fence.syncpt_value =
168 fence_out->syncpt_value;
169 } else {
170 args->fence.syncpt_id = -1;
171 args->fence.syncpt_value = 0;
172 }
173 }
174 }
175 gk20a_fence_put(fence_out);
176
177 return 0;
178}
179
180static int gk20a_ctrl_mark_compressible_write(
181 struct gk20a *g,
182 struct nvgpu_gpu_mark_compressible_write_args *args)
183{
184 int ret;
185
186 ret = gk20a_mark_compressible_write(g, args->handle,
187 args->valid_compbits, args->offset, args->zbc_color);
188
189 return ret;
190}
191
192static int gk20a_ctrl_alloc_as(
193 struct gk20a *g,
194 struct nvgpu_alloc_as_args *args)
195{
196 struct gk20a_as_share *as_share;
197 int err;
198 int fd;
199 struct file *file;
200 char name[64];
201
202 err = get_unused_fd_flags(O_RDWR);
203 if (err < 0)
204 return err;
205 fd = err;
206
207 snprintf(name, sizeof(name), "nvhost-%s-fd%d", g->name, fd);
208
209 file = anon_inode_getfile(name, g->as.cdev.ops, NULL, O_RDWR);
210 if (IS_ERR(file)) {
211 err = PTR_ERR(file);
212 goto clean_up;
213 }
214
215 err = gk20a_as_alloc_share(&g->as, args->big_page_size, args->flags,
216 &as_share);
217 if (err)
218 goto clean_up_file;
219
220 fd_install(fd, file);
221 file->private_data = as_share;
222
223 args->as_fd = fd;
224 return 0;
225
226clean_up_file:
227 fput(file);
228clean_up:
229 put_unused_fd(fd);
230 return err;
231}
232
233static int gk20a_ctrl_open_tsg(struct gk20a *g,
234 struct nvgpu_gpu_open_tsg_args *args)
235{
236 int err;
237 int fd;
238 struct file *file;
239 char name[64];
240
241 err = get_unused_fd_flags(O_RDWR);
242 if (err < 0)
243 return err;
244 fd = err;
245
246 snprintf(name, sizeof(name), "nvgpu-%s-tsg%d", g->name, fd);
247
248 file = anon_inode_getfile(name, g->tsg.cdev.ops, NULL, O_RDWR);
249 if (IS_ERR(file)) {
250 err = PTR_ERR(file);
251 goto clean_up;
252 }
253
254 err = gk20a_tsg_open(g, file);
255 if (err)
256 goto clean_up_file;
257
258 fd_install(fd, file);
259 args->tsg_fd = fd;
260 return 0;
261
262clean_up_file:
263 fput(file);
264clean_up:
265 put_unused_fd(fd);
266 return err;
267}
268
269static int gk20a_ctrl_get_tpc_masks(struct gk20a *g,
270 struct nvgpu_gpu_get_tpc_masks_args *args)
271{
272 struct gr_gk20a *gr = &g->gr;
273 int err = 0;
274 const u32 gpc_tpc_mask_size = sizeof(u32) * gr->gpc_count;
275
276 if (args->mask_buf_size > 0) {
277 size_t write_size = gpc_tpc_mask_size;
278
279 if (write_size > args->mask_buf_size)
280 write_size = args->mask_buf_size;
281
282 err = copy_to_user((void __user *)(uintptr_t)
283 args->mask_buf_addr,
284 gr->gpc_tpc_mask, write_size);
285 }
286
287 if (err == 0)
288 args->mask_buf_size = gpc_tpc_mask_size;
289
290 return err;
291}
292
293static int gk20a_ctrl_get_fbp_l2_masks(
294 struct gk20a *g, struct nvgpu_gpu_get_fbp_l2_masks_args *args)
295{
296 struct gr_gk20a *gr = &g->gr;
297 int err = 0;
298 const u32 fbp_l2_mask_size = sizeof(u32) * gr->max_fbps_count;
299
300 if (args->mask_buf_size > 0) {
301 size_t write_size = fbp_l2_mask_size;
302
303 if (write_size > args->mask_buf_size)
304 write_size = args->mask_buf_size;
305
306 err = copy_to_user((void __user *)(uintptr_t)
307 args->mask_buf_addr,
308 gr->fbp_rop_l2_en_mask, write_size);
309 }
310
311 if (err == 0)
312 args->mask_buf_size = fbp_l2_mask_size;
313
314 return err;
315}
316
317static int nvgpu_gpu_ioctl_l2_fb_ops(struct gk20a *g,
318 struct nvgpu_gpu_l2_fb_args *args)
319{
320 int err = 0;
321
322 if (args->l2_flush)
323 g->ops.mm.l2_flush(g, args->l2_invalidate ? true : false);
324
325 if (args->fb_flush)
326 g->ops.mm.fb_flush(g);
327
328 return err;
329}
330
331/* Invalidate i-cache for kepler & maxwell */
332static int nvgpu_gpu_ioctl_inval_icache(
333 struct gk20a *g,
334 struct nvgpu_gpu_inval_icache_args *args)
335{
336 struct channel_gk20a *ch;
337 int err;
338
339 ch = gk20a_get_channel_from_file(args->channel_fd);
340 if (!ch)
341 return -EINVAL;
342
343 /* Take the global lock, since we'll be doing global regops */
344 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
345 err = g->ops.gr.inval_icache(g, ch);
346 nvgpu_mutex_release(&g->dbg_sessions_lock);
347 return err;
348}
349
350static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
351 struct gk20a *g,
352 struct nvgpu_gpu_mmu_debug_mode_args *args)
353{
354 if (gk20a_busy(g)) {
355 gk20a_err(dev_from_gk20a(g), "failed to power on gpu\n");
356 return -EINVAL;
357 }
358
359 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
360 g->ops.fb.set_debug_mode(g, args->state == 1);
361 nvgpu_mutex_release(&g->dbg_sessions_lock);
362
363 gk20a_idle(g);
364 return 0;
365}
366
367static int nvgpu_gpu_ioctl_set_debug_mode(
368 struct gk20a *g,
369 struct nvgpu_gpu_sm_debug_mode_args *args)
370{
371 struct channel_gk20a *ch;
372 int err;
373
374 ch = gk20a_get_channel_from_file(args->channel_fd);
375 if (!ch)
376 return -EINVAL;
377
378 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
379 if (g->ops.gr.set_sm_debug_mode)
380 err = g->ops.gr.set_sm_debug_mode(g, ch,
381 args->sms, !!args->enable);
382 else
383 err = -ENOSYS;
384 nvgpu_mutex_release(&g->dbg_sessions_lock);
385
386 return err;
387}
388
389static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g)
390{
391 int err;
392
393 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
394 err = g->ops.gr.trigger_suspend(g);
395 nvgpu_mutex_release(&g->dbg_sessions_lock);
396 return err;
397}
398
399static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
400 struct nvgpu_gpu_wait_pause_args *args)
401{
402 int err = 0;
403 struct warpstate *w_state;
404 u32 sm_count, size;
405
406 sm_count = g->gr.gpc_count * g->gr.tpc_count;
407 size = sm_count * sizeof(struct warpstate);
408 w_state = nvgpu_kzalloc(g, size);
409 if (!w_state)
410 return -ENOMEM;
411
412 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
413 g->ops.gr.wait_for_pause(g, w_state);
414
415 /* Copy to user space - pointed by "args->pwarpstate" */
416 if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, w_state, size)) {
417 gk20a_dbg_fn("copy_to_user failed!");
418 err = -EFAULT;
419 }
420
421 nvgpu_mutex_release(&g->dbg_sessions_lock);
422 nvgpu_kfree(g, w_state);
423 return err;
424}
425
426static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g)
427{
428 int err = 0;
429
430 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
431 err = g->ops.gr.resume_from_pause(g);
432 nvgpu_mutex_release(&g->dbg_sessions_lock);
433 return err;
434}
435
436static int nvgpu_gpu_ioctl_clear_sm_errors(struct gk20a *g)
437{
438 return g->ops.gr.clear_sm_errors(g);
439}
440
441static int nvgpu_gpu_ioctl_has_any_exception(
442 struct gk20a *g,
443 struct nvgpu_gpu_tpc_exception_en_status_args *args)
444{
445 u32 tpc_exception_en;
446
447 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
448 tpc_exception_en = g->ops.gr.tpc_enabled_exceptions(g);
449 nvgpu_mutex_release(&g->dbg_sessions_lock);
450
451 args->tpc_exception_en_sm_mask = tpc_exception_en;
452
453 return 0;
454}
455
456static int gk20a_ctrl_get_num_vsms(struct gk20a *g,
457 struct nvgpu_gpu_num_vsms *args)
458{
459 struct gr_gk20a *gr = &g->gr;
460 args->num_vsms = gr->no_of_sm;
461 return 0;
462}
463
464static int gk20a_ctrl_vsm_mapping(struct gk20a *g,
465 struct nvgpu_gpu_vsms_mapping *args)
466{
467 int err = 0;
468 struct gr_gk20a *gr = &g->gr;
469 size_t write_size = gr->no_of_sm *
470 sizeof(struct nvgpu_gpu_vsms_mapping_entry);
471 struct nvgpu_gpu_vsms_mapping_entry *vsms_buf;
472 u32 i;
473
474 vsms_buf = nvgpu_kzalloc(g, write_size);
475 if (vsms_buf == NULL)
476 return -ENOMEM;
477
478 for (i = 0; i < gr->no_of_sm; i++) {
479 vsms_buf[i].gpc_index = gr->sm_to_cluster[i].gpc_index;
480 vsms_buf[i].tpc_index = gr->sm_to_cluster[i].tpc_index;
481 }
482
483 err = copy_to_user((void __user *)(uintptr_t)
484 args->vsms_map_buf_addr,
485 vsms_buf, write_size);
486 nvgpu_kfree(g, vsms_buf);
487
488 return err;
489}
490
491static int gk20a_ctrl_get_buffer_info(
492 struct gk20a *g, struct nvgpu_gpu_get_buffer_info_args *args)
493{
494 return gk20a_mm_get_buffer_info(dev_from_gk20a(g), args->in.dmabuf_fd,
495 &args->out.id, &args->out.length);
496}
497
498static inline u64 get_cpu_timestamp_tsc(void)
499{
500 return ((u64) get_cycles());
501}
502
503static inline u64 get_cpu_timestamp_jiffies(void)
504{
505 return (get_jiffies_64() - INITIAL_JIFFIES);
506}
507
508static inline u64 get_cpu_timestamp_timeofday(void)
509{
510 struct timeval tv;
511
512 do_gettimeofday(&tv);
513 return timeval_to_jiffies(&tv);
514}
515
516static inline int get_timestamps_zipper(struct gk20a *g,
517 u64 (*get_cpu_timestamp)(void),
518 struct nvgpu_gpu_get_cpu_time_correlation_info_args *args)
519{
520 int err = 0;
521 unsigned int i = 0;
522
523 if (gk20a_busy(g)) {
524 gk20a_err(dev_from_gk20a(g), "GPU not powered on\n");
525 err = -EINVAL;
526 goto end;
527 }
528
529 for (i = 0; i < args->count; i++) {
530 err = g->ops.bus.read_ptimer(g, &args->samples[i].gpu_timestamp);
531 if (err)
532 return err;
533
534 args->samples[i].cpu_timestamp = get_cpu_timestamp();
535 }
536
537end:
538 gk20a_idle(g);
539 return err;
540}
541
542static int nvgpu_gpu_get_cpu_time_correlation_info(
543 struct gk20a *g,
544 struct nvgpu_gpu_get_cpu_time_correlation_info_args *args)
545{
546 int err = 0;
547 u64 (*get_cpu_timestamp)(void) = NULL;
548
549 if (args->count > NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_MAX_COUNT)
550 return -EINVAL;
551
552 switch (args->source_id) {
553 case NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_SRC_ID_TSC:
554 get_cpu_timestamp = get_cpu_timestamp_tsc;
555 break;
556 case NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_SRC_ID_JIFFIES:
557 get_cpu_timestamp = get_cpu_timestamp_jiffies;
558 break;
559 case NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_SRC_ID_TIMEOFDAY:
560 get_cpu_timestamp = get_cpu_timestamp_timeofday;
561 break;
562 default:
563 gk20a_err(dev_from_gk20a(g), "invalid cpu clock source id\n");
564 return -EINVAL;
565 }
566
567 err = get_timestamps_zipper(g, get_cpu_timestamp, args);
568 return err;
569}
570
571static int nvgpu_gpu_get_gpu_time(
572 struct gk20a *g,
573 struct nvgpu_gpu_get_gpu_time_args *args)
574{
575 u64 time;
576 int err;
577
578 err = gk20a_busy(g);
579 if (err)
580 return err;
581
582 err = g->ops.bus.read_ptimer(g, &time);
583 if (!err)
584 args->gpu_timestamp = time;
585
586 gk20a_idle(g);
587 return err;
588}
589
590static int nvgpu_gpu_get_engine_info(
591 struct gk20a *g,
592 struct nvgpu_gpu_get_engine_info_args *args)
593{
594 int err = 0;
595 u32 engine_enum = ENGINE_INVAL_GK20A;
596 u32 report_index = 0;
597 u32 engine_id_idx;
598 const u32 max_buffer_engines = args->engine_info_buf_size /
599 sizeof(struct nvgpu_gpu_get_engine_info_item);
600 struct nvgpu_gpu_get_engine_info_item __user *dst_item_list =
601 (void __user *)(uintptr_t)args->engine_info_buf_addr;
602
603 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;
604 ++engine_id_idx) {
605 u32 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
606 const struct fifo_engine_info_gk20a *src_info =
607 &g->fifo.engine_info[active_engine_id];
608 struct nvgpu_gpu_get_engine_info_item dst_info;
609
610 memset(&dst_info, 0, sizeof(dst_info));
611
612 engine_enum = src_info->engine_enum;
613
614 switch (engine_enum) {
615 case ENGINE_GR_GK20A:
616 dst_info.engine_id = NVGPU_GPU_ENGINE_ID_GR;
617 break;
618
619 case ENGINE_GRCE_GK20A:
620 dst_info.engine_id = NVGPU_GPU_ENGINE_ID_GR_COPY;
621 break;
622
623 case ENGINE_ASYNC_CE_GK20A:
624 dst_info.engine_id = NVGPU_GPU_ENGINE_ID_ASYNC_COPY;
625 break;
626
627 default:
628 gk20a_err(dev_from_gk20a(g), "Unmapped engine enum %u\n",
629 engine_enum);
630 continue;
631 }
632
633 dst_info.engine_instance = src_info->inst_id;
634 dst_info.runlist_id = src_info->runlist_id;
635
636 if (report_index < max_buffer_engines) {
637 err = copy_to_user(&dst_item_list[report_index],
638 &dst_info, sizeof(dst_info));
639 if (err)
640 goto clean_up;
641 }
642
643 ++report_index;
644 }
645
646 args->engine_info_buf_size =
647 report_index * sizeof(struct nvgpu_gpu_get_engine_info_item);
648
649clean_up:
650 return err;
651}
652
653static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
654 struct nvgpu_gpu_alloc_vidmem_args *args)
655{
656 u32 align = args->in.alignment ? args->in.alignment : SZ_4K;
657 int fd;
658
659 gk20a_dbg_fn("");
660
661 /* not yet supported */
662 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK))
663 return -EINVAL;
664
665 /* not yet supported */
666 if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_VPR))
667 return -EINVAL;
668
669 if (args->in.size & (SZ_4K - 1))
670 return -EINVAL;
671
672 if (!args->in.size)
673 return -EINVAL;
674
675 if (align & (align - 1))
676 return -EINVAL;
677
678 if (align > roundup_pow_of_two(args->in.size)) {
679 /* log this special case, buddy allocator detail */
680 gk20a_warn(dev_from_gk20a(g),
681 "alignment larger than buffer size rounded up to power of 2 is not supported");
682 return -EINVAL;
683 }
684
685 fd = gk20a_vidmem_buf_alloc(g, args->in.size);
686 if (fd < 0)
687 return fd;
688
689 args->out.dmabuf_fd = fd;
690
691 gk20a_dbg_fn("done, fd=%d", fd);
692
693 return 0;
694}
695
696static int nvgpu_gpu_get_memory_state(struct gk20a *g,
697 struct nvgpu_gpu_get_memory_state_args *args)
698{
699 int err;
700
701 gk20a_dbg_fn("");
702
703 if (args->reserved[0] || args->reserved[1] ||
704 args->reserved[2] || args->reserved[3])
705 return -EINVAL;
706
707 err = gk20a_vidmem_get_space(g, &args->total_free_bytes);
708
709 gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes);
710
711 return err;
712}
713
714#ifdef CONFIG_ARCH_TEGRA_18x_SOC
715static int nvgpu_gpu_clk_get_vf_points(struct gk20a *g,
716 struct gk20a_ctrl_priv *priv,
717 struct nvgpu_gpu_clk_vf_points_args *args)
718{
719 struct nvgpu_gpu_clk_vf_point clk_point;
720 struct nvgpu_gpu_clk_vf_point __user *entry;
721 struct nvgpu_clk_session *session = priv->clk_session;
722 u32 clk_domains = 0;
723 int err;
724 u16 last_mhz;
725 u16 *fpoints;
726 u32 i;
727 u32 max_points = 0;
728 u32 num_points = 0;
729 u16 min_mhz;
730 u16 max_mhz;
731
732 gk20a_dbg_fn("");
733
734 if (!session || args->flags)
735 return -EINVAL;
736
737 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
738 args->num_entries = 0;
739
740 if (!nvgpu_clk_arb_is_valid_domain(g, args->clk_domain))
741 return -EINVAL;
742
743 err = nvgpu_clk_arb_get_arbiter_clk_f_points(g,
744 args->clk_domain, &max_points, NULL);
745 if (err)
746 return err;
747
748 if (!args->max_entries) {
749 args->max_entries = max_points;
750 return 0;
751 }
752
753 if (args->max_entries < max_points)
754 return -EINVAL;
755
756 err = nvgpu_clk_arb_get_arbiter_clk_range(g, args->clk_domain,
757 &min_mhz, &max_mhz);
758 if (err)
759 return err;
760
761 fpoints = nvgpu_kcalloc(g, max_points, sizeof(u16));
762 if (!fpoints)
763 return -ENOMEM;
764
765 err = nvgpu_clk_arb_get_arbiter_clk_f_points(g,
766 args->clk_domain, &max_points, fpoints);
767 if (err)
768 goto fail;
769
770 entry = (struct nvgpu_gpu_clk_vf_point __user *)
771 (uintptr_t)args->clk_vf_point_entries;
772
773 last_mhz = 0;
774 num_points = 0;
775 for (i = 0; (i < max_points) && !err; i++) {
776
777 /* filter out duplicate frequencies */
778 if (fpoints[i] == last_mhz)
779 continue;
780
781 /* filter out out-of-range frequencies */
782 if ((fpoints[i] < min_mhz) || (fpoints[i] > max_mhz))
783 continue;
784
785 last_mhz = fpoints[i];
786 clk_point.freq_hz = MHZ_TO_HZ(fpoints[i]);
787
788 err = copy_to_user((void __user *)entry, &clk_point,
789 sizeof(clk_point));
790
791 num_points++;
792 entry++;
793 }
794
795 args->num_entries = num_points;
796
797fail:
798 nvgpu_kfree(g, fpoints);
799 return err;
800}
801
802static int nvgpu_gpu_clk_get_range(struct gk20a *g,
803 struct gk20a_ctrl_priv *priv,
804 struct nvgpu_gpu_clk_range_args *args)
805{
806 struct nvgpu_gpu_clk_range clk_range;
807 struct nvgpu_gpu_clk_range __user *entry;
808 struct nvgpu_clk_session *session = priv->clk_session;
809
810 u32 clk_domains = 0;
811 u32 num_domains;
812 u32 num_entries;
813 u32 i;
814 int bit;
815 int err;
816 u16 min_mhz, max_mhz;
817
818 gk20a_dbg_fn("");
819
820 if (!session)
821 return -EINVAL;
822
823 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
824 num_domains = hweight_long(clk_domains);
825
826 if (!args->flags) {
827 if (!args->num_entries) {
828 args->num_entries = num_domains;
829 return 0;
830 }
831
832 if (args->num_entries < num_domains)
833 return -EINVAL;
834
835 args->num_entries = 0;
836 num_entries = num_domains;
837
838 } else {
839 if (args->flags != NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS)
840 return -EINVAL;
841
842 num_entries = args->num_entries;
843 if (num_entries > num_domains)
844 return -EINVAL;
845 }
846
847 entry = (struct nvgpu_gpu_clk_range __user *)
848 (uintptr_t)args->clk_range_entries;
849
850 for (i = 0; i < num_entries; i++, entry++) {
851
852 if (args->flags == NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) {
853 if (copy_from_user(&clk_range, (void __user *)entry,
854 sizeof(clk_range)))
855 return -EFAULT;
856 } else {
857 bit = ffs(clk_domains) - 1;
858 clk_range.clk_domain = bit;
859 clk_domains &= ~BIT(bit);
860 }
861
862 clk_range.flags = 0;
863 err = nvgpu_clk_arb_get_arbiter_clk_range(g,
864 clk_range.clk_domain,
865 &min_mhz, &max_mhz);
866 clk_range.min_hz = MHZ_TO_HZ(min_mhz);
867 clk_range.max_hz = MHZ_TO_HZ(max_mhz);
868
869 if (err)
870 return err;
871
872 err = copy_to_user(entry, &clk_range, sizeof(clk_range));
873 if (err)
874 return -EFAULT;
875 }
876
877 args->num_entries = num_entries;
878
879 return 0;
880}
881
882
883static int nvgpu_gpu_clk_set_info(struct gk20a *g,
884 struct gk20a_ctrl_priv *priv,
885 struct nvgpu_gpu_clk_set_info_args *args)
886{
887 struct nvgpu_gpu_clk_info clk_info;
888 struct nvgpu_gpu_clk_info __user *entry;
889 struct nvgpu_clk_session *session = priv->clk_session;
890
891 int fd;
892 u32 clk_domains = 0;
893 u16 freq_mhz;
894 int i;
895 int ret;
896
897 gk20a_dbg_fn("");
898
899 if (!session || args->flags)
900 return -EINVAL;
901
902 gk20a_dbg_info("line=%d", __LINE__);
903
904 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
905 if (!clk_domains)
906 return -EINVAL;
907
908 entry = (struct nvgpu_gpu_clk_info __user *)
909 (uintptr_t)args->clk_info_entries;
910
911 gk20a_dbg_info("line=%d", __LINE__);
912
913 for (i = 0; i < args->num_entries; i++, entry++) {
914
915 gk20a_dbg_info("line=%d", __LINE__);
916 if (copy_from_user(&clk_info, entry, sizeof(clk_info)))
917 return -EFAULT;
918
919 gk20a_dbg_info("i=%d domain=0x%08x", i, clk_info.clk_domain);
920
921 if (!nvgpu_clk_arb_is_valid_domain(g, clk_info.clk_domain))
922 return -EINVAL;
923 }
924
925 entry = (struct nvgpu_gpu_clk_info __user *)
926 (uintptr_t)args->clk_info_entries;
927
928 ret = nvgpu_clk_arb_install_request_fd(g, session, &fd);
929 if (ret < 0)
930 return ret;
931
932 for (i = 0; i < args->num_entries; i++, entry++) {
933
934 if (copy_from_user(&clk_info, (void __user *)entry,
935 sizeof(clk_info)))
936 return -EFAULT;
937 freq_mhz = HZ_TO_MHZ(clk_info.freq_hz);
938
939 nvgpu_clk_arb_set_session_target_mhz(session, fd,
940 clk_info.clk_domain, freq_mhz);
941 }
942
943 ret = nvgpu_clk_arb_commit_request_fd(g, session, fd);
944 if (ret < 0)
945 return ret;
946
947 args->completion_fd = fd;
948
949 return ret;
950}
951
952static int nvgpu_gpu_clk_get_info(struct gk20a *g,
953 struct gk20a_ctrl_priv *priv,
954 struct nvgpu_gpu_clk_get_info_args *args)
955{
956 struct nvgpu_gpu_clk_info clk_info;
957 struct nvgpu_gpu_clk_info __user *entry;
958 struct nvgpu_clk_session *session = priv->clk_session;
959 u32 clk_domains = 0;
960 u32 num_domains;
961 u32 num_entries;
962 u32 i;
963 u16 freq_mhz;
964 int err;
965 int bit;
966
967 gk20a_dbg_fn("");
968
969 if (!session)
970 return -EINVAL;
971
972 clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g);
973 num_domains = hweight_long(clk_domains);
974
975 if (!args->flags) {
976 if (!args->num_entries) {
977 args->num_entries = num_domains;
978 return 0;
979 }
980
981 if (args->num_entries < num_domains)
982 return -EINVAL;
983
984 args->num_entries = 0;
985 num_entries = num_domains;
986
987 } else {
988 if (args->flags != NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS)
989 return -EINVAL;
990
991 num_entries = args->num_entries;
992 if (num_entries > num_domains * 3)
993 return -EINVAL;
994 }
995
996 entry = (struct nvgpu_gpu_clk_info __user *)
997 (uintptr_t)args->clk_info_entries;
998
999 for (i = 0; i < num_entries; i++, entry++) {
1000
1001 if (args->flags == NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) {
1002 if (copy_from_user(&clk_info, (void __user *)entry,
1003 sizeof(clk_info)))
1004 return -EFAULT;
1005 } else {
1006 bit = ffs(clk_domains) - 1;
1007 clk_info.clk_domain = bit;
1008 clk_domains &= ~BIT(bit);
1009 clk_info.clk_type = args->clk_type;
1010 }
1011
1012 switch (clk_info.clk_type) {
1013 case NVGPU_GPU_CLK_TYPE_TARGET:
1014 err = nvgpu_clk_arb_get_session_target_mhz(session,
1015 clk_info.clk_domain, &freq_mhz);
1016 break;
1017 case NVGPU_GPU_CLK_TYPE_ACTUAL:
1018 err = nvgpu_clk_arb_get_arbiter_actual_mhz(g,
1019 clk_info.clk_domain, &freq_mhz);
1020 break;
1021 case NVGPU_GPU_CLK_TYPE_EFFECTIVE:
1022 err = nvgpu_clk_arb_get_arbiter_effective_mhz(g,
1023 clk_info.clk_domain, &freq_mhz);
1024 break;
1025 default:
1026 freq_mhz = 0;
1027 err = -EINVAL;
1028 break;
1029 }
1030 if (err)
1031 return err;
1032
1033 clk_info.flags = 0;
1034 clk_info.freq_hz = MHZ_TO_HZ(freq_mhz);
1035
1036 err = copy_to_user((void __user *)entry, &clk_info,
1037 sizeof(clk_info));
1038 if (err)
1039 return -EFAULT;
1040 }
1041
1042 args->num_entries = num_entries;
1043
1044 return 0;
1045}
1046
1047static int nvgpu_gpu_get_event_fd(struct gk20a *g,
1048 struct gk20a_ctrl_priv *priv,
1049 struct nvgpu_gpu_get_event_fd_args *args)
1050{
1051 struct nvgpu_clk_session *session = priv->clk_session;
1052
1053 gk20a_dbg_fn("");
1054
1055 if (!session)
1056 return -EINVAL;
1057
1058 return nvgpu_clk_arb_install_event_fd(g, session, &args->event_fd,
1059 args->flags);
1060}
1061
1062static int nvgpu_gpu_get_voltage(struct gk20a *g,
1063 struct nvgpu_gpu_get_voltage_args *args)
1064{
1065 int err = -EINVAL;
1066
1067 gk20a_dbg_fn("");
1068
1069 if (args->reserved)
1070 return -EINVAL;
1071
1072 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_VOLTAGE))
1073 return -EINVAL;
1074
1075 err = gk20a_busy(g);
1076 if (err)
1077 return err;
1078
1079 switch (args->which) {
1080 case NVGPU_GPU_VOLTAGE_CORE:
1081 err = volt_get_voltage(g, CTRL_VOLT_DOMAIN_LOGIC, &args->voltage);
1082 break;
1083 case NVGPU_GPU_VOLTAGE_SRAM:
1084 err = volt_get_voltage(g, CTRL_VOLT_DOMAIN_SRAM, &args->voltage);
1085 break;
1086 case NVGPU_GPU_VOLTAGE_BUS:
1087 err = pmgr_pwr_devices_get_voltage(g, &args->voltage);
1088 break;
1089 default:
1090 err = -EINVAL;
1091 }
1092
1093 gk20a_idle(g);
1094
1095 return err;
1096}
1097
1098static int nvgpu_gpu_get_current(struct gk20a *g,
1099 struct nvgpu_gpu_get_current_args *args)
1100{
1101 int err;
1102
1103 gk20a_dbg_fn("");
1104
1105 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1106 return -EINVAL;
1107
1108 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_CURRENT))
1109 return -EINVAL;
1110
1111 err = gk20a_busy(g);
1112 if (err)
1113 return err;
1114
1115 err = pmgr_pwr_devices_get_current(g, &args->currnt);
1116
1117 gk20a_idle(g);
1118
1119 return err;
1120}
1121
1122static int nvgpu_gpu_get_power(struct gk20a *g,
1123 struct nvgpu_gpu_get_power_args *args)
1124{
1125 int err;
1126
1127 gk20a_dbg_fn("");
1128
1129 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1130 return -EINVAL;
1131
1132 if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_POWER))
1133 return -EINVAL;
1134
1135 err = gk20a_busy(g);
1136 if (err)
1137 return err;
1138
1139 err = pmgr_pwr_devices_get_power(g, &args->power);
1140
1141 gk20a_idle(g);
1142
1143 return err;
1144}
1145
1146static int nvgpu_gpu_get_temperature(struct gk20a *g,
1147 struct nvgpu_gpu_get_temperature_args *args)
1148{
1149 int err;
1150 u32 temp_f24_8;
1151
1152 gk20a_dbg_fn("");
1153
1154 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1155 return -EINVAL;
1156
1157 if (!g->ops.therm.get_internal_sensor_curr_temp)
1158 return -EINVAL;
1159
1160 err = gk20a_busy(g);
1161 if (err)
1162 return err;
1163
1164 err = g->ops.therm.get_internal_sensor_curr_temp(g, &temp_f24_8);
1165
1166 gk20a_idle(g);
1167
1168 args->temp_f24_8 = (s32)temp_f24_8;
1169
1170 return err;
1171}
1172#endif
1173
1174static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g,
1175 struct nvgpu_gpu_set_therm_alert_limit_args *args)
1176{
1177 int err;
1178
1179 gk20a_dbg_fn("");
1180
1181 if (args->reserved[0] || args->reserved[1] || args->reserved[2])
1182 return -EINVAL;
1183
1184 if (!g->ops.therm.configure_therm_alert)
1185 return -EINVAL;
1186
1187 err = gk20a_busy(g);
1188 if (err)
1189 return err;
1190
1191 err = g->ops.therm.configure_therm_alert(g, args->temp_f24_8);
1192
1193 gk20a_idle(g);
1194
1195 return err;
1196}
1197
1198long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1199{
1200 struct gk20a_ctrl_priv *priv = filp->private_data;
1201 struct gk20a *g = priv->g;
1202 struct nvgpu_gpu_zcull_get_ctx_size_args *get_ctx_size_args;
1203 struct nvgpu_gpu_zcull_get_info_args *get_info_args;
1204 struct nvgpu_gpu_zbc_set_table_args *set_table_args;
1205 struct nvgpu_gpu_zbc_query_table_args *query_table_args;
1206 u8 buf[NVGPU_GPU_IOCTL_MAX_ARG_SIZE];
1207 struct gr_zcull_info *zcull_info;
1208 struct zbc_entry *zbc_val;
1209 struct zbc_query_params *zbc_tbl;
1210 int i, err = 0;
1211
1212 gk20a_dbg_fn("");
1213
1214 if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) ||
1215 (_IOC_NR(cmd) == 0) ||
1216 (_IOC_NR(cmd) > NVGPU_GPU_IOCTL_LAST) ||
1217 (_IOC_SIZE(cmd) > NVGPU_GPU_IOCTL_MAX_ARG_SIZE))
1218 return -EINVAL;
1219
1220 memset(buf, 0, sizeof(buf));
1221 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1222 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1223 return -EFAULT;
1224 }
1225
1226 if (!g->gr.sw_ready) {
1227 err = gk20a_busy(g);
1228 if (err)
1229 return err;
1230
1231 gk20a_idle(g);
1232 }
1233
1234 switch (cmd) {
1235 case NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE:
1236 get_ctx_size_args = (struct nvgpu_gpu_zcull_get_ctx_size_args *)buf;
1237
1238 get_ctx_size_args->size = gr_gk20a_get_ctxsw_zcull_size(g, &g->gr);
1239
1240 break;
1241 case NVGPU_GPU_IOCTL_ZCULL_GET_INFO:
1242 get_info_args = (struct nvgpu_gpu_zcull_get_info_args *)buf;
1243
1244 memset(get_info_args, 0, sizeof(struct nvgpu_gpu_zcull_get_info_args));
1245
1246 zcull_info = nvgpu_kzalloc(g, sizeof(struct gr_zcull_info));
1247 if (zcull_info == NULL)
1248 return -ENOMEM;
1249
1250 err = g->ops.gr.get_zcull_info(g, &g->gr, zcull_info);
1251 if (err) {
1252 nvgpu_kfree(g, zcull_info);
1253 break;
1254 }
1255
1256 get_info_args->width_align_pixels = zcull_info->width_align_pixels;
1257 get_info_args->height_align_pixels = zcull_info->height_align_pixels;
1258 get_info_args->pixel_squares_by_aliquots = zcull_info->pixel_squares_by_aliquots;
1259 get_info_args->aliquot_total = zcull_info->aliquot_total;
1260 get_info_args->region_byte_multiplier = zcull_info->region_byte_multiplier;
1261 get_info_args->region_header_size = zcull_info->region_header_size;
1262 get_info_args->subregion_header_size = zcull_info->subregion_header_size;
1263 get_info_args->subregion_width_align_pixels = zcull_info->subregion_width_align_pixels;
1264 get_info_args->subregion_height_align_pixels = zcull_info->subregion_height_align_pixels;
1265 get_info_args->subregion_count = zcull_info->subregion_count;
1266
1267 nvgpu_kfree(g, zcull_info);
1268 break;
1269 case NVGPU_GPU_IOCTL_ZBC_SET_TABLE:
1270 set_table_args = (struct nvgpu_gpu_zbc_set_table_args *)buf;
1271
1272 zbc_val = nvgpu_kzalloc(g, sizeof(struct zbc_entry));
1273 if (zbc_val == NULL)
1274 return -ENOMEM;
1275
1276 zbc_val->format = set_table_args->format;
1277 zbc_val->type = set_table_args->type;
1278
1279 switch (zbc_val->type) {
1280 case GK20A_ZBC_TYPE_COLOR:
1281 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
1282 zbc_val->color_ds[i] = set_table_args->color_ds[i];
1283 zbc_val->color_l2[i] = set_table_args->color_l2[i];
1284 }
1285 break;
1286 case GK20A_ZBC_TYPE_DEPTH:
1287 case T19X_ZBC:
1288 zbc_val->depth = set_table_args->depth;
1289 break;
1290 default:
1291 err = -EINVAL;
1292 }
1293
1294 if (!err) {
1295 err = gk20a_busy(g);
1296 if (!err) {
1297 err = g->ops.gr.zbc_set_table(g, &g->gr,
1298 zbc_val);
1299 gk20a_idle(g);
1300 }
1301 }
1302
1303 if (zbc_val)
1304 nvgpu_kfree(g, zbc_val);
1305 break;
1306 case NVGPU_GPU_IOCTL_ZBC_QUERY_TABLE:
1307 query_table_args = (struct nvgpu_gpu_zbc_query_table_args *)buf;
1308
1309 zbc_tbl = nvgpu_kzalloc(g, sizeof(struct zbc_query_params));
1310 if (zbc_tbl == NULL)
1311 return -ENOMEM;
1312
1313 zbc_tbl->type = query_table_args->type;
1314 zbc_tbl->index_size = query_table_args->index_size;
1315
1316 err = g->ops.gr.zbc_query_table(g, &g->gr, zbc_tbl);
1317
1318 if (!err) {
1319 switch (zbc_tbl->type) {
1320 case GK20A_ZBC_TYPE_COLOR:
1321 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
1322 query_table_args->color_ds[i] = zbc_tbl->color_ds[i];
1323 query_table_args->color_l2[i] = zbc_tbl->color_l2[i];
1324 }
1325 break;
1326 case GK20A_ZBC_TYPE_DEPTH:
1327 case T19X_ZBC:
1328 query_table_args->depth = zbc_tbl->depth;
1329 break;
1330 case GK20A_ZBC_TYPE_INVALID:
1331 query_table_args->index_size = zbc_tbl->index_size;
1332 break;
1333 default:
1334 err = -EINVAL;
1335 }
1336 if (!err) {
1337 query_table_args->format = zbc_tbl->format;
1338 query_table_args->ref_cnt = zbc_tbl->ref_cnt;
1339 }
1340 }
1341
1342 if (zbc_tbl)
1343 nvgpu_kfree(g, zbc_tbl);
1344 break;
1345
1346 case NVGPU_GPU_IOCTL_GET_CHARACTERISTICS:
1347 err = gk20a_ctrl_ioctl_gpu_characteristics(
1348 g, (struct nvgpu_gpu_get_characteristics *)buf);
1349 break;
1350 case NVGPU_GPU_IOCTL_PREPARE_COMPRESSIBLE_READ:
1351 err = gk20a_ctrl_prepare_compressible_read(g,
1352 (struct nvgpu_gpu_prepare_compressible_read_args *)buf);
1353 break;
1354 case NVGPU_GPU_IOCTL_MARK_COMPRESSIBLE_WRITE:
1355 err = gk20a_ctrl_mark_compressible_write(g,
1356 (struct nvgpu_gpu_mark_compressible_write_args *)buf);
1357 break;
1358 case NVGPU_GPU_IOCTL_ALLOC_AS:
1359 err = gk20a_ctrl_alloc_as(g,
1360 (struct nvgpu_alloc_as_args *)buf);
1361 break;
1362 case NVGPU_GPU_IOCTL_OPEN_TSG:
1363 err = gk20a_ctrl_open_tsg(g,
1364 (struct nvgpu_gpu_open_tsg_args *)buf);
1365 break;
1366 case NVGPU_GPU_IOCTL_GET_TPC_MASKS:
1367 err = gk20a_ctrl_get_tpc_masks(g,
1368 (struct nvgpu_gpu_get_tpc_masks_args *)buf);
1369 break;
1370 case NVGPU_GPU_IOCTL_GET_FBP_L2_MASKS:
1371 err = gk20a_ctrl_get_fbp_l2_masks(g,
1372 (struct nvgpu_gpu_get_fbp_l2_masks_args *)buf);
1373 break;
1374 case NVGPU_GPU_IOCTL_OPEN_CHANNEL:
1375 /* this arg type here, but ..gpu_open_channel_args in nvgpu.h
1376 * for consistency - they are the same */
1377 err = gk20a_channel_open_ioctl(g,
1378 (struct nvgpu_channel_open_args *)buf);
1379 break;
1380 case NVGPU_GPU_IOCTL_FLUSH_L2:
1381 err = nvgpu_gpu_ioctl_l2_fb_ops(g,
1382 (struct nvgpu_gpu_l2_fb_args *)buf);
1383 break;
1384 case NVGPU_GPU_IOCTL_INVAL_ICACHE:
1385 err = gr_gk20a_elpg_protected_call(g,
1386 nvgpu_gpu_ioctl_inval_icache(g, (struct nvgpu_gpu_inval_icache_args *)buf));
1387 break;
1388
1389 case NVGPU_GPU_IOCTL_SET_MMUDEBUG_MODE:
1390 err = nvgpu_gpu_ioctl_set_mmu_debug_mode(g,
1391 (struct nvgpu_gpu_mmu_debug_mode_args *)buf);
1392 break;
1393
1394 case NVGPU_GPU_IOCTL_SET_SM_DEBUG_MODE:
1395 err = gr_gk20a_elpg_protected_call(g,
1396 nvgpu_gpu_ioctl_set_debug_mode(g, (struct nvgpu_gpu_sm_debug_mode_args *)buf));
1397 break;
1398
1399 case NVGPU_GPU_IOCTL_TRIGGER_SUSPEND:
1400 err = nvgpu_gpu_ioctl_trigger_suspend(g);
1401 break;
1402
1403 case NVGPU_GPU_IOCTL_WAIT_FOR_PAUSE:
1404 err = nvgpu_gpu_ioctl_wait_for_pause(g,
1405 (struct nvgpu_gpu_wait_pause_args *)buf);
1406 break;
1407
1408 case NVGPU_GPU_IOCTL_RESUME_FROM_PAUSE:
1409 err = nvgpu_gpu_ioctl_resume_from_pause(g);
1410 break;
1411
1412 case NVGPU_GPU_IOCTL_CLEAR_SM_ERRORS:
1413 err = nvgpu_gpu_ioctl_clear_sm_errors(g);
1414 break;
1415
1416 case NVGPU_GPU_IOCTL_GET_TPC_EXCEPTION_EN_STATUS:
1417 err = nvgpu_gpu_ioctl_has_any_exception(g,
1418 (struct nvgpu_gpu_tpc_exception_en_status_args *)buf);
1419 break;
1420
1421 case NVGPU_GPU_IOCTL_NUM_VSMS:
1422 err = gk20a_ctrl_get_num_vsms(g,
1423 (struct nvgpu_gpu_num_vsms *)buf);
1424 break;
1425 case NVGPU_GPU_IOCTL_VSMS_MAPPING:
1426 err = gk20a_ctrl_vsm_mapping(g,
1427 (struct nvgpu_gpu_vsms_mapping *)buf);
1428 break;
1429
1430 case NVGPU_GPU_IOCTL_GET_BUFFER_INFO:
1431 err = gk20a_ctrl_get_buffer_info(g,
1432 (struct nvgpu_gpu_get_buffer_info_args *)buf);
1433 break;
1434
1435 case NVGPU_GPU_IOCTL_GET_CPU_TIME_CORRELATION_INFO:
1436 err = nvgpu_gpu_get_cpu_time_correlation_info(g,
1437 (struct nvgpu_gpu_get_cpu_time_correlation_info_args *)buf);
1438 break;
1439
1440 case NVGPU_GPU_IOCTL_GET_GPU_TIME:
1441 err = nvgpu_gpu_get_gpu_time(g,
1442 (struct nvgpu_gpu_get_gpu_time_args *)buf);
1443 break;
1444
1445 case NVGPU_GPU_IOCTL_GET_ENGINE_INFO:
1446 err = nvgpu_gpu_get_engine_info(g,
1447 (struct nvgpu_gpu_get_engine_info_args *)buf);
1448 break;
1449
1450 case NVGPU_GPU_IOCTL_ALLOC_VIDMEM:
1451 err = nvgpu_gpu_alloc_vidmem(g,
1452 (struct nvgpu_gpu_alloc_vidmem_args *)buf);
1453 break;
1454
1455 case NVGPU_GPU_IOCTL_GET_MEMORY_STATE:
1456 err = nvgpu_gpu_get_memory_state(g,
1457 (struct nvgpu_gpu_get_memory_state_args *)buf);
1458 break;
1459
1460#ifdef CONFIG_ARCH_TEGRA_18x_SOC
1461 case NVGPU_GPU_IOCTL_CLK_GET_RANGE:
1462 err = nvgpu_gpu_clk_get_range(g, priv,
1463 (struct nvgpu_gpu_clk_range_args *)buf);
1464 break;
1465
1466 case NVGPU_GPU_IOCTL_CLK_GET_VF_POINTS:
1467 err = nvgpu_gpu_clk_get_vf_points(g, priv,
1468 (struct nvgpu_gpu_clk_vf_points_args *)buf);
1469 break;
1470
1471 case NVGPU_GPU_IOCTL_CLK_SET_INFO:
1472 err = nvgpu_gpu_clk_set_info(g, priv,
1473 (struct nvgpu_gpu_clk_set_info_args *)buf);
1474 break;
1475
1476 case NVGPU_GPU_IOCTL_CLK_GET_INFO:
1477 err = nvgpu_gpu_clk_get_info(g, priv,
1478 (struct nvgpu_gpu_clk_get_info_args *)buf);
1479 break;
1480
1481 case NVGPU_GPU_IOCTL_GET_EVENT_FD:
1482 err = nvgpu_gpu_get_event_fd(g, priv,
1483 (struct nvgpu_gpu_get_event_fd_args *)buf);
1484 break;
1485
1486 case NVGPU_GPU_IOCTL_GET_VOLTAGE:
1487 err = nvgpu_gpu_get_voltage(g,
1488 (struct nvgpu_gpu_get_voltage_args *)buf);
1489 break;
1490
1491 case NVGPU_GPU_IOCTL_GET_CURRENT:
1492 err = nvgpu_gpu_get_current(g,
1493 (struct nvgpu_gpu_get_current_args *)buf);
1494 break;
1495
1496 case NVGPU_GPU_IOCTL_GET_POWER:
1497 err = nvgpu_gpu_get_power(g,
1498 (struct nvgpu_gpu_get_power_args *)buf);
1499 break;
1500
1501 case NVGPU_GPU_IOCTL_GET_TEMPERATURE:
1502 err = nvgpu_gpu_get_temperature(g,
1503 (struct nvgpu_gpu_get_temperature_args *)buf);
1504 break;
1505#endif
1506
1507 case NVGPU_GPU_IOCTL_SET_THERM_ALERT_LIMIT:
1508 err = nvgpu_gpu_set_therm_alert_limit(g,
1509 (struct nvgpu_gpu_set_therm_alert_limit_args *)buf);
1510 break;
1511
1512 default:
1513 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
1514 err = -ENOTTY;
1515 break;
1516 }
1517
1518 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1519 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1520
1521 return err;
1522}
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h
deleted file mode 100644
index 26ca4e20..00000000
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef CTRL_GK20A_H
17#define CTRL_GK20A_H
18
19int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp);
20int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp);
21long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
22
23#endif /* CTRL_GK20A_H */