diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c | 1962 |
1 files changed, 0 insertions, 1962 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c deleted file mode 100644 index 73a8131d..00000000 --- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c +++ /dev/null | |||
@@ -1,1962 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011-2018, NVIDIA Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | |||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/cdev.h> | ||
19 | #include <linux/file.h> | ||
20 | #include <linux/anon_inodes.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <uapi/linux/nvgpu.h> | ||
23 | |||
24 | #include <nvgpu/bitops.h> | ||
25 | #include <nvgpu/kmem.h> | ||
26 | #include <nvgpu/bug.h> | ||
27 | #include <nvgpu/ptimer.h> | ||
28 | #include <nvgpu/vidmem.h> | ||
29 | #include <nvgpu/log.h> | ||
30 | #include <nvgpu/enabled.h> | ||
31 | #include <nvgpu/sizes.h> | ||
32 | |||
33 | #include <nvgpu/linux/vidmem.h> | ||
34 | |||
35 | #include "ioctl_ctrl.h" | ||
36 | #include "ioctl_dbg.h" | ||
37 | #include "ioctl_as.h" | ||
38 | #include "ioctl_tsg.h" | ||
39 | #include "ioctl_channel.h" | ||
40 | #include "gk20a/gk20a.h" | ||
41 | #include "gk20a/fence_gk20a.h" | ||
42 | |||
43 | #include "platform_gk20a.h" | ||
44 | #include "os_linux.h" | ||
45 | #include "dmabuf.h" | ||
46 | #include "channel.h" | ||
47 | |||
48 | #define HZ_TO_MHZ(a) ((a > 0xF414F9CD7ULL) ? 0xffff : (a >> 32) ? \ | ||
49 | (u32) ((a * 0x10C8ULL) >> 32) : (u16) ((u32) a/MHZ)) | ||
50 | #define MHZ_TO_HZ(a) ((u64)a * MHZ) | ||
51 | |||
52 | struct gk20a_ctrl_priv { | ||
53 | struct device *dev; | ||
54 | struct gk20a *g; | ||
55 | struct nvgpu_clk_session *clk_session; | ||
56 | }; | ||
57 | |||
58 | static u32 gk20a_as_translate_as_alloc_flags(struct gk20a *g, u32 flags) | ||
59 | { | ||
60 | u32 core_flags = 0; | ||
61 | |||
62 | if (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) | ||
63 | core_flags |= NVGPU_AS_ALLOC_USERSPACE_MANAGED; | ||
64 | |||
65 | return core_flags; | ||
66 | } | ||
67 | |||
68 | int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp) | ||
69 | { | ||
70 | struct nvgpu_os_linux *l; | ||
71 | struct gk20a *g; | ||
72 | struct gk20a_ctrl_priv *priv; | ||
73 | int err = 0; | ||
74 | |||
75 | l = container_of(inode->i_cdev, | ||
76 | struct nvgpu_os_linux, ctrl.cdev); | ||
77 | g = gk20a_get(&l->g); | ||
78 | if (!g) | ||
79 | return -ENODEV; | ||
80 | |||
81 | nvgpu_log_fn(g, " "); | ||
82 | |||
83 | priv = nvgpu_kzalloc(g, sizeof(struct gk20a_ctrl_priv)); | ||
84 | if (!priv) { | ||
85 | err = -ENOMEM; | ||
86 | goto free_ref; | ||
87 | } | ||
88 | filp->private_data = priv; | ||
89 | priv->dev = dev_from_gk20a(g); | ||
90 | /* | ||
91 | * We dont close the arbiter fd's after driver teardown to support | ||
92 | * GPU_LOST events, so we store g here, instead of dereferencing the | ||
93 | * dev structure on teardown | ||
94 | */ | ||
95 | priv->g = g; | ||
96 | |||
97 | if (!g->sw_ready) { | ||
98 | err = gk20a_busy(g); | ||
99 | if (err) | ||
100 | goto free_ref; | ||
101 | gk20a_idle(g); | ||
102 | } | ||
103 | |||
104 | err = nvgpu_clk_arb_init_session(g, &priv->clk_session); | ||
105 | free_ref: | ||
106 | if (err) | ||
107 | gk20a_put(g); | ||
108 | return err; | ||
109 | } | ||
110 | int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp) | ||
111 | { | ||
112 | struct gk20a_ctrl_priv *priv = filp->private_data; | ||
113 | struct gk20a *g = priv->g; | ||
114 | |||
115 | nvgpu_log_fn(g, " "); | ||
116 | |||
117 | if (priv->clk_session) | ||
118 | nvgpu_clk_arb_release_session(g, priv->clk_session); | ||
119 | |||
120 | gk20a_put(g); | ||
121 | nvgpu_kfree(g, priv); | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | struct nvgpu_flags_mapping { | ||
127 | u64 ioctl_flag; | ||
128 | int enabled_flag; | ||
129 | }; | ||
130 | |||
131 | static struct nvgpu_flags_mapping flags_mapping[] = { | ||
132 | {NVGPU_GPU_FLAGS_HAS_SYNCPOINTS, | ||
133 | NVGPU_HAS_SYNCPOINTS}, | ||
134 | {NVGPU_GPU_FLAGS_SUPPORT_PARTIAL_MAPPINGS, | ||
135 | NVGPU_SUPPORT_PARTIAL_MAPPINGS}, | ||
136 | {NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS, | ||
137 | NVGPU_SUPPORT_SPARSE_ALLOCS}, | ||
138 | {NVGPU_GPU_FLAGS_SUPPORT_SYNC_FENCE_FDS, | ||
139 | NVGPU_SUPPORT_SYNC_FENCE_FDS}, | ||
140 | {NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS, | ||
141 | NVGPU_SUPPORT_CYCLE_STATS}, | ||
142 | {NVGPU_GPU_FLAGS_SUPPORT_CYCLE_STATS_SNAPSHOT, | ||
143 | NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT}, | ||
144 | {NVGPU_GPU_FLAGS_SUPPORT_USERSPACE_MANAGED_AS, | ||
145 | NVGPU_SUPPORT_USERSPACE_MANAGED_AS}, | ||
146 | {NVGPU_GPU_FLAGS_SUPPORT_TSG, | ||
147 | NVGPU_SUPPORT_TSG}, | ||
148 | {NVGPU_GPU_FLAGS_SUPPORT_CLOCK_CONTROLS, | ||
149 | NVGPU_SUPPORT_CLOCK_CONTROLS}, | ||
150 | {NVGPU_GPU_FLAGS_SUPPORT_GET_VOLTAGE, | ||
151 | NVGPU_SUPPORT_GET_VOLTAGE}, | ||
152 | {NVGPU_GPU_FLAGS_SUPPORT_GET_CURRENT, | ||
153 | NVGPU_SUPPORT_GET_CURRENT}, | ||
154 | {NVGPU_GPU_FLAGS_SUPPORT_GET_POWER, | ||
155 | NVGPU_SUPPORT_GET_POWER}, | ||
156 | {NVGPU_GPU_FLAGS_SUPPORT_GET_TEMPERATURE, | ||
157 | NVGPU_SUPPORT_GET_TEMPERATURE}, | ||
158 | {NVGPU_GPU_FLAGS_SUPPORT_SET_THERM_ALERT_LIMIT, | ||
159 | NVGPU_SUPPORT_SET_THERM_ALERT_LIMIT}, | ||
160 | {NVGPU_GPU_FLAGS_SUPPORT_DEVICE_EVENTS, | ||
161 | NVGPU_SUPPORT_DEVICE_EVENTS}, | ||
162 | {NVGPU_GPU_FLAGS_SUPPORT_FECS_CTXSW_TRACE, | ||
163 | NVGPU_SUPPORT_FECS_CTXSW_TRACE}, | ||
164 | {NVGPU_GPU_FLAGS_SUPPORT_DETERMINISTIC_SUBMIT_NO_JOBTRACKING, | ||
165 | NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_NO_JOBTRACKING}, | ||
166 | {NVGPU_GPU_FLAGS_SUPPORT_DETERMINISTIC_SUBMIT_FULL, | ||
167 | NVGPU_SUPPORT_DETERMINISTIC_SUBMIT_FULL}, | ||
168 | {NVGPU_GPU_FLAGS_SUPPORT_DETERMINISTIC_OPTS, | ||
169 | NVGPU_SUPPORT_DETERMINISTIC_OPTS}, | ||
170 | {NVGPU_GPU_FLAGS_SUPPORT_SYNCPOINT_ADDRESS, | ||
171 | NVGPU_SUPPORT_SYNCPOINT_ADDRESS}, | ||
172 | {NVGPU_GPU_FLAGS_SUPPORT_USER_SYNCPOINT, | ||
173 | NVGPU_SUPPORT_USER_SYNCPOINT}, | ||
174 | {NVGPU_GPU_FLAGS_SUPPORT_IO_COHERENCE, | ||
175 | NVGPU_SUPPORT_IO_COHERENCE}, | ||
176 | {NVGPU_GPU_FLAGS_SUPPORT_RESCHEDULE_RUNLIST, | ||
177 | NVGPU_SUPPORT_RESCHEDULE_RUNLIST}, | ||
178 | {NVGPU_GPU_FLAGS_SUPPORT_MAP_DIRECT_KIND_CTRL, | ||
179 | NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL}, | ||
180 | {NVGPU_GPU_FLAGS_ECC_ENABLED_SM_LRF, | ||
181 | NVGPU_ECC_ENABLED_SM_LRF}, | ||
182 | {NVGPU_GPU_FLAGS_ECC_ENABLED_SM_SHM, | ||
183 | NVGPU_ECC_ENABLED_SM_SHM}, | ||
184 | {NVGPU_GPU_FLAGS_ECC_ENABLED_TEX, | ||
185 | NVGPU_ECC_ENABLED_TEX}, | ||
186 | {NVGPU_GPU_FLAGS_ECC_ENABLED_LTC, | ||
187 | NVGPU_ECC_ENABLED_LTC}, | ||
188 | {NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS, | ||
189 | NVGPU_SUPPORT_TSG_SUBCONTEXTS}, | ||
190 | {NVGPU_GPU_FLAGS_SUPPORT_SCG, | ||
191 | NVGPU_SUPPORT_SCG}, | ||
192 | {NVGPU_GPU_FLAGS_SUPPORT_VPR, | ||
193 | NVGPU_SUPPORT_VPR}, | ||
194 | }; | ||
195 | |||
196 | static u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags(struct gk20a *g) | ||
197 | { | ||
198 | unsigned int i; | ||
199 | u64 ioctl_flags = 0; | ||
200 | |||
201 | for (i = 0; i < sizeof(flags_mapping)/sizeof(*flags_mapping); i++) { | ||
202 | if (nvgpu_is_enabled(g, flags_mapping[i].enabled_flag)) | ||
203 | ioctl_flags |= flags_mapping[i].ioctl_flag; | ||
204 | } | ||
205 | |||
206 | return ioctl_flags; | ||
207 | } | ||
208 | |||
209 | static void nvgpu_set_preemption_mode_flags(struct gk20a *g, | ||
210 | struct nvgpu_gpu_characteristics *gpu) | ||
211 | { | ||
212 | struct nvgpu_preemption_modes_rec preemption_mode_rec; | ||
213 | |||
214 | g->ops.gr.get_preemption_mode_flags(g, &preemption_mode_rec); | ||
215 | |||
216 | gpu->graphics_preemption_mode_flags = | ||
217 | nvgpu_get_ioctl_graphics_preempt_mode_flags( | ||
218 | preemption_mode_rec.graphics_preemption_mode_flags); | ||
219 | gpu->compute_preemption_mode_flags = | ||
220 | nvgpu_get_ioctl_compute_preempt_mode_flags( | ||
221 | preemption_mode_rec.compute_preemption_mode_flags); | ||
222 | |||
223 | gpu->default_graphics_preempt_mode = | ||
224 | nvgpu_get_ioctl_graphics_preempt_mode( | ||
225 | preemption_mode_rec.default_graphics_preempt_mode); | ||
226 | gpu->default_compute_preempt_mode = | ||
227 | nvgpu_get_ioctl_compute_preempt_mode( | ||
228 | preemption_mode_rec.default_compute_preempt_mode); | ||
229 | } | ||
230 | |||
231 | static long | ||
232 | gk20a_ctrl_ioctl_gpu_characteristics( | ||
233 | struct gk20a *g, | ||
234 | struct nvgpu_gpu_get_characteristics *request) | ||
235 | { | ||
236 | struct nvgpu_gpu_characteristics gpu; | ||
237 | long err = 0; | ||
238 | |||
239 | if (gk20a_busy(g)) { | ||
240 | nvgpu_err(g, "failed to power on gpu"); | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | |||
244 | memset(&gpu, 0, sizeof(gpu)); | ||
245 | |||
246 | gpu.L2_cache_size = g->ops.ltc.determine_L2_size_bytes(g); | ||
247 | gpu.on_board_video_memory_size = 0; /* integrated GPU */ | ||
248 | |||
249 | gpu.num_gpc = g->gr.gpc_count; | ||
250 | gpu.max_gpc_count = g->gr.max_gpc_count; | ||
251 | |||
252 | gpu.num_tpc_per_gpc = g->gr.max_tpc_per_gpc_count; | ||
253 | |||
254 | gpu.bus_type = NVGPU_GPU_BUS_TYPE_AXI; /* always AXI for now */ | ||
255 | |||
256 | gpu.compression_page_size = g->ops.fb.compression_page_size(g); | ||
257 | |||
258 | gpu.gpc_mask = (1 << g->gr.gpc_count)-1; | ||
259 | |||
260 | gpu.flags = nvgpu_ctrl_ioctl_gpu_characteristics_flags(g); | ||
261 | |||
262 | gpu.arch = g->params.gpu_arch; | ||
263 | gpu.impl = g->params.gpu_impl; | ||
264 | gpu.rev = g->params.gpu_rev; | ||
265 | gpu.reg_ops_limit = NVGPU_IOCTL_DBG_REG_OPS_LIMIT; | ||
266 | gpu.map_buffer_batch_limit = nvgpu_is_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH) ? | ||
267 | NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT : 0; | ||
268 | gpu.twod_class = g->ops.get_litter_value(g, GPU_LIT_TWOD_CLASS); | ||
269 | gpu.threed_class = g->ops.get_litter_value(g, GPU_LIT_THREED_CLASS); | ||
270 | gpu.compute_class = g->ops.get_litter_value(g, GPU_LIT_COMPUTE_CLASS); | ||
271 | gpu.gpfifo_class = g->ops.get_litter_value(g, GPU_LIT_GPFIFO_CLASS); | ||
272 | gpu.inline_to_memory_class = | ||
273 | g->ops.get_litter_value(g, GPU_LIT_I2M_CLASS); | ||
274 | gpu.dma_copy_class = | ||
275 | g->ops.get_litter_value(g, GPU_LIT_DMA_COPY_CLASS); | ||
276 | |||
277 | gpu.vbios_version = g->bios.vbios_version; | ||
278 | gpu.vbios_oem_version = g->bios.vbios_oem_version; | ||
279 | |||
280 | gpu.big_page_size = nvgpu_mm_get_default_big_page_size(g); | ||
281 | gpu.pde_coverage_bit_count = | ||
282 | g->ops.mm.get_mmu_levels(g, gpu.big_page_size)[0].lo_bit[0]; | ||
283 | gpu.available_big_page_sizes = nvgpu_mm_get_available_big_page_sizes(g); | ||
284 | |||
285 | gpu.sm_arch_sm_version = g->params.sm_arch_sm_version; | ||
286 | gpu.sm_arch_spa_version = g->params.sm_arch_spa_version; | ||
287 | gpu.sm_arch_warp_count = g->params.sm_arch_warp_count; | ||
288 | |||
289 | gpu.max_css_buffer_size = g->gr.max_css_buffer_size; | ||
290 | |||
291 | gpu.gpu_ioctl_nr_last = NVGPU_GPU_IOCTL_LAST; | ||
292 | gpu.tsg_ioctl_nr_last = NVGPU_TSG_IOCTL_LAST; | ||
293 | gpu.dbg_gpu_ioctl_nr_last = NVGPU_DBG_GPU_IOCTL_LAST; | ||
294 | gpu.ioctl_channel_nr_last = NVGPU_IOCTL_CHANNEL_LAST; | ||
295 | gpu.as_ioctl_nr_last = NVGPU_AS_IOCTL_LAST; | ||
296 | gpu.event_ioctl_nr_last = NVGPU_EVENT_IOCTL_LAST; | ||
297 | gpu.gpu_va_bit_count = 40; | ||
298 | |||
299 | strlcpy(gpu.chipname, g->name, sizeof(gpu.chipname)); | ||
300 | gpu.max_fbps_count = g->ops.gr.get_max_fbps_count(g); | ||
301 | gpu.fbp_en_mask = g->ops.gr.get_fbp_en_mask(g); | ||
302 | gpu.max_ltc_per_fbp = g->ops.gr.get_max_ltc_per_fbp(g); | ||
303 | gpu.max_lts_per_ltc = g->ops.gr.get_max_lts_per_ltc(g); | ||
304 | gpu.gr_compbit_store_base_hw = g->gr.compbit_store.base_hw; | ||
305 | gpu.gr_gobs_per_comptagline_per_slice = | ||
306 | g->gr.gobs_per_comptagline_per_slice; | ||
307 | gpu.num_ltc = g->ltc_count; | ||
308 | gpu.lts_per_ltc = g->gr.slices_per_ltc; | ||
309 | gpu.cbc_cache_line_size = g->gr.cacheline_size; | ||
310 | gpu.cbc_comptags_per_line = g->gr.comptags_per_cacheline; | ||
311 | |||
312 | if (g->ops.clk.get_maxrate) | ||
313 | gpu.max_freq = g->ops.clk.get_maxrate(g, CTRL_CLK_DOMAIN_GPCCLK); | ||
314 | |||
315 | gpu.local_video_memory_size = g->mm.vidmem.size; | ||
316 | |||
317 | gpu.pci_vendor_id = g->pci_vendor_id; | ||
318 | gpu.pci_device_id = g->pci_device_id; | ||
319 | gpu.pci_subsystem_vendor_id = g->pci_subsystem_vendor_id; | ||
320 | gpu.pci_subsystem_device_id = g->pci_subsystem_device_id; | ||
321 | gpu.pci_class = g->pci_class; | ||
322 | gpu.pci_revision = g->pci_revision; | ||
323 | |||
324 | nvgpu_set_preemption_mode_flags(g, &gpu); | ||
325 | |||
326 | if (request->gpu_characteristics_buf_size > 0) { | ||
327 | size_t write_size = sizeof(gpu); | ||
328 | |||
329 | if (write_size > request->gpu_characteristics_buf_size) | ||
330 | write_size = request->gpu_characteristics_buf_size; | ||
331 | |||
332 | err = copy_to_user((void __user *)(uintptr_t) | ||
333 | request->gpu_characteristics_buf_addr, | ||
334 | &gpu, write_size); | ||
335 | } | ||
336 | |||
337 | if (err == 0) | ||
338 | request->gpu_characteristics_buf_size = sizeof(gpu); | ||
339 | |||
340 | gk20a_idle(g); | ||
341 | |||
342 | return err; | ||
343 | } | ||
344 | |||
345 | static int gk20a_ctrl_prepare_compressible_read( | ||
346 | struct gk20a *g, | ||
347 | struct nvgpu_gpu_prepare_compressible_read_args *args) | ||
348 | { | ||
349 | int ret = -ENOSYS; | ||
350 | |||
351 | #ifdef CONFIG_NVGPU_SUPPORT_CDE | ||
352 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
353 | struct nvgpu_channel_fence fence; | ||
354 | struct gk20a_fence *fence_out = NULL; | ||
355 | int submit_flags = nvgpu_submit_gpfifo_user_flags_to_common_flags( | ||
356 | args->submit_flags); | ||
357 | int fd = -1; | ||
358 | |||
359 | fence.id = args->fence.syncpt_id; | ||
360 | fence.value = args->fence.syncpt_value; | ||
361 | |||
362 | /* Try and allocate an fd here*/ | ||
363 | if ((submit_flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) | ||
364 | && (submit_flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) { | ||
365 | fd = get_unused_fd_flags(O_RDWR); | ||
366 | if (fd < 0) | ||
367 | return fd; | ||
368 | } | ||
369 | |||
370 | ret = gk20a_prepare_compressible_read(l, args->handle, | ||
371 | args->request_compbits, args->offset, | ||
372 | args->compbits_hoffset, args->compbits_voffset, | ||
373 | args->scatterbuffer_offset, | ||
374 | args->width, args->height, args->block_height_log2, | ||
375 | submit_flags, &fence, &args->valid_compbits, | ||
376 | &args->zbc_color, &fence_out); | ||
377 | |||
378 | if (ret) { | ||
379 | if (fd != -1) | ||
380 | put_unused_fd(fd); | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | /* Convert fence_out to something we can pass back to user space. */ | ||
385 | if (submit_flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) { | ||
386 | if (submit_flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) { | ||
387 | if (fence_out) { | ||
388 | ret = gk20a_fence_install_fd(fence_out, fd); | ||
389 | if (ret) | ||
390 | put_unused_fd(fd); | ||
391 | else | ||
392 | args->fence.fd = fd; | ||
393 | } else { | ||
394 | args->fence.fd = -1; | ||
395 | put_unused_fd(fd); | ||
396 | } | ||
397 | } else { | ||
398 | if (fence_out) { | ||
399 | args->fence.syncpt_id = fence_out->syncpt_id; | ||
400 | args->fence.syncpt_value = | ||
401 | fence_out->syncpt_value; | ||
402 | } else { | ||
403 | args->fence.syncpt_id = -1; | ||
404 | args->fence.syncpt_value = 0; | ||
405 | } | ||
406 | } | ||
407 | } | ||
408 | gk20a_fence_put(fence_out); | ||
409 | #endif | ||
410 | |||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | static int gk20a_ctrl_mark_compressible_write( | ||
415 | struct gk20a *g, | ||
416 | struct nvgpu_gpu_mark_compressible_write_args *args) | ||
417 | { | ||
418 | int ret = -ENOSYS; | ||
419 | |||
420 | #ifdef CONFIG_NVGPU_SUPPORT_CDE | ||
421 | ret = gk20a_mark_compressible_write(g, args->handle, | ||
422 | args->valid_compbits, args->offset, args->zbc_color); | ||
423 | #endif | ||
424 | |||
425 | return ret; | ||
426 | } | ||
427 | |||
428 | static int gk20a_ctrl_alloc_as( | ||
429 | struct gk20a *g, | ||
430 | struct nvgpu_alloc_as_args *args) | ||
431 | { | ||
432 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
433 | struct gk20a_as_share *as_share; | ||
434 | int err; | ||
435 | int fd; | ||
436 | struct file *file; | ||
437 | char name[64]; | ||
438 | |||
439 | err = get_unused_fd_flags(O_RDWR); | ||
440 | if (err < 0) | ||
441 | return err; | ||
442 | fd = err; | ||
443 | |||
444 | snprintf(name, sizeof(name), "nvhost-%s-fd%d", g->name, fd); | ||
445 | |||
446 | file = anon_inode_getfile(name, l->as_dev.cdev.ops, NULL, O_RDWR); | ||
447 | if (IS_ERR(file)) { | ||
448 | err = PTR_ERR(file); | ||
449 | goto clean_up; | ||
450 | } | ||
451 | |||
452 | err = gk20a_as_alloc_share(g, args->big_page_size, | ||
453 | gk20a_as_translate_as_alloc_flags(g, | ||
454 | args->flags), | ||
455 | &as_share); | ||
456 | if (err) | ||
457 | goto clean_up_file; | ||
458 | |||
459 | fd_install(fd, file); | ||
460 | file->private_data = as_share; | ||
461 | |||
462 | args->as_fd = fd; | ||
463 | return 0; | ||
464 | |||
465 | clean_up_file: | ||
466 | fput(file); | ||
467 | clean_up: | ||
468 | put_unused_fd(fd); | ||
469 | return err; | ||
470 | } | ||
471 | |||
472 | static int gk20a_ctrl_open_tsg(struct gk20a *g, | ||
473 | struct nvgpu_gpu_open_tsg_args *args) | ||
474 | { | ||
475 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
476 | int err; | ||
477 | int fd; | ||
478 | struct file *file; | ||
479 | char name[64]; | ||
480 | |||
481 | err = get_unused_fd_flags(O_RDWR); | ||
482 | if (err < 0) | ||
483 | return err; | ||
484 | fd = err; | ||
485 | |||
486 | snprintf(name, sizeof(name), "nvgpu-%s-tsg%d", g->name, fd); | ||
487 | |||
488 | file = anon_inode_getfile(name, l->tsg.cdev.ops, NULL, O_RDWR); | ||
489 | if (IS_ERR(file)) { | ||
490 | err = PTR_ERR(file); | ||
491 | goto clean_up; | ||
492 | } | ||
493 | |||
494 | err = nvgpu_ioctl_tsg_open(g, file); | ||
495 | if (err) | ||
496 | goto clean_up_file; | ||
497 | |||
498 | fd_install(fd, file); | ||
499 | args->tsg_fd = fd; | ||
500 | return 0; | ||
501 | |||
502 | clean_up_file: | ||
503 | fput(file); | ||
504 | clean_up: | ||
505 | put_unused_fd(fd); | ||
506 | return err; | ||
507 | } | ||
508 | |||
509 | static int gk20a_ctrl_get_tpc_masks(struct gk20a *g, | ||
510 | struct nvgpu_gpu_get_tpc_masks_args *args) | ||
511 | { | ||
512 | struct gr_gk20a *gr = &g->gr; | ||
513 | int err = 0; | ||
514 | const u32 gpc_tpc_mask_size = sizeof(u32) * gr->gpc_count; | ||
515 | |||
516 | if (args->mask_buf_size > 0) { | ||
517 | size_t write_size = gpc_tpc_mask_size; | ||
518 | |||
519 | if (write_size > args->mask_buf_size) | ||
520 | write_size = args->mask_buf_size; | ||
521 | |||
522 | err = copy_to_user((void __user *)(uintptr_t) | ||
523 | args->mask_buf_addr, | ||
524 | gr->gpc_tpc_mask, write_size); | ||
525 | } | ||
526 | |||
527 | if (err == 0) | ||
528 | args->mask_buf_size = gpc_tpc_mask_size; | ||
529 | |||
530 | return err; | ||
531 | } | ||
532 | |||
533 | static int gk20a_ctrl_get_fbp_l2_masks( | ||
534 | struct gk20a *g, struct nvgpu_gpu_get_fbp_l2_masks_args *args) | ||
535 | { | ||
536 | struct gr_gk20a *gr = &g->gr; | ||
537 | int err = 0; | ||
538 | const u32 fbp_l2_mask_size = sizeof(u32) * gr->max_fbps_count; | ||
539 | |||
540 | if (args->mask_buf_size > 0) { | ||
541 | size_t write_size = fbp_l2_mask_size; | ||
542 | |||
543 | if (write_size > args->mask_buf_size) | ||
544 | write_size = args->mask_buf_size; | ||
545 | |||
546 | err = copy_to_user((void __user *)(uintptr_t) | ||
547 | args->mask_buf_addr, | ||
548 | gr->fbp_rop_l2_en_mask, write_size); | ||
549 | } | ||
550 | |||
551 | if (err == 0) | ||
552 | args->mask_buf_size = fbp_l2_mask_size; | ||
553 | |||
554 | return err; | ||
555 | } | ||
556 | |||
557 | static int nvgpu_gpu_ioctl_l2_fb_ops(struct gk20a *g, | ||
558 | struct nvgpu_gpu_l2_fb_args *args) | ||
559 | { | ||
560 | int err = 0; | ||
561 | |||
562 | if ((!args->l2_flush && !args->fb_flush) || | ||
563 | (!args->l2_flush && args->l2_invalidate)) | ||
564 | return -EINVAL; | ||
565 | |||
566 | if (args->l2_flush) | ||
567 | g->ops.mm.l2_flush(g, args->l2_invalidate ? true : false); | ||
568 | |||
569 | if (args->fb_flush) | ||
570 | g->ops.mm.fb_flush(g); | ||
571 | |||
572 | return err; | ||
573 | } | ||
574 | |||
575 | /* Invalidate i-cache for kepler & maxwell */ | ||
576 | static int nvgpu_gpu_ioctl_inval_icache( | ||
577 | struct gk20a *g, | ||
578 | struct nvgpu_gpu_inval_icache_args *args) | ||
579 | { | ||
580 | struct channel_gk20a *ch; | ||
581 | int err; | ||
582 | |||
583 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
584 | if (!ch) | ||
585 | return -EINVAL; | ||
586 | |||
587 | /* Take the global lock, since we'll be doing global regops */ | ||
588 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
589 | err = g->ops.gr.inval_icache(g, ch); | ||
590 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
591 | |||
592 | gk20a_channel_put(ch); | ||
593 | return err; | ||
594 | } | ||
595 | |||
596 | static int nvgpu_gpu_ioctl_set_mmu_debug_mode( | ||
597 | struct gk20a *g, | ||
598 | struct nvgpu_gpu_mmu_debug_mode_args *args) | ||
599 | { | ||
600 | if (gk20a_busy(g)) { | ||
601 | nvgpu_err(g, "failed to power on gpu"); | ||
602 | return -EINVAL; | ||
603 | } | ||
604 | |||
605 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
606 | g->ops.fb.set_debug_mode(g, args->state == 1); | ||
607 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
608 | |||
609 | gk20a_idle(g); | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | static int nvgpu_gpu_ioctl_set_debug_mode( | ||
614 | struct gk20a *g, | ||
615 | struct nvgpu_gpu_sm_debug_mode_args *args) | ||
616 | { | ||
617 | struct channel_gk20a *ch; | ||
618 | int err; | ||
619 | |||
620 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
621 | if (!ch) | ||
622 | return -EINVAL; | ||
623 | |||
624 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
625 | if (g->ops.gr.set_sm_debug_mode) | ||
626 | err = g->ops.gr.set_sm_debug_mode(g, ch, | ||
627 | args->sms, !!args->enable); | ||
628 | else | ||
629 | err = -ENOSYS; | ||
630 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
631 | |||
632 | gk20a_channel_put(ch); | ||
633 | return err; | ||
634 | } | ||
635 | |||
636 | static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g) | ||
637 | { | ||
638 | int err; | ||
639 | |||
640 | err = gk20a_busy(g); | ||
641 | if (err) | ||
642 | return err; | ||
643 | |||
644 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
645 | err = g->ops.gr.trigger_suspend(g); | ||
646 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
647 | |||
648 | gk20a_idle(g); | ||
649 | |||
650 | return err; | ||
651 | } | ||
652 | |||
653 | static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g, | ||
654 | struct nvgpu_gpu_wait_pause_args *args) | ||
655 | { | ||
656 | int err; | ||
657 | struct warpstate *ioctl_w_state; | ||
658 | struct nvgpu_warpstate *w_state = NULL; | ||
659 | u32 sm_count, ioctl_size, size, sm_id; | ||
660 | |||
661 | sm_count = g->gr.gpc_count * g->gr.tpc_count; | ||
662 | |||
663 | ioctl_size = sm_count * sizeof(struct warpstate); | ||
664 | ioctl_w_state = nvgpu_kzalloc(g, ioctl_size); | ||
665 | if (!ioctl_w_state) | ||
666 | return -ENOMEM; | ||
667 | |||
668 | size = sm_count * sizeof(struct nvgpu_warpstate); | ||
669 | w_state = nvgpu_kzalloc(g, size); | ||
670 | if (!w_state) { | ||
671 | err = -ENOMEM; | ||
672 | goto out_free; | ||
673 | } | ||
674 | |||
675 | err = gk20a_busy(g); | ||
676 | if (err) | ||
677 | goto out_free; | ||
678 | |||
679 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
680 | g->ops.gr.wait_for_pause(g, w_state); | ||
681 | |||
682 | for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { | ||
683 | ioctl_w_state[sm_id].valid_warps[0] = | ||
684 | w_state[sm_id].valid_warps[0]; | ||
685 | ioctl_w_state[sm_id].valid_warps[1] = | ||
686 | w_state[sm_id].valid_warps[1]; | ||
687 | ioctl_w_state[sm_id].trapped_warps[0] = | ||
688 | w_state[sm_id].trapped_warps[0]; | ||
689 | ioctl_w_state[sm_id].trapped_warps[1] = | ||
690 | w_state[sm_id].trapped_warps[1]; | ||
691 | ioctl_w_state[sm_id].paused_warps[0] = | ||
692 | w_state[sm_id].paused_warps[0]; | ||
693 | ioctl_w_state[sm_id].paused_warps[1] = | ||
694 | w_state[sm_id].paused_warps[1]; | ||
695 | } | ||
696 | /* Copy to user space - pointed by "args->pwarpstate" */ | ||
697 | if (copy_to_user((void __user *)(uintptr_t)args->pwarpstate, | ||
698 | w_state, ioctl_size)) { | ||
699 | nvgpu_log_fn(g, "copy_to_user failed!"); | ||
700 | err = -EFAULT; | ||
701 | } | ||
702 | |||
703 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
704 | |||
705 | gk20a_idle(g); | ||
706 | |||
707 | out_free: | ||
708 | nvgpu_kfree(g, w_state); | ||
709 | nvgpu_kfree(g, ioctl_w_state); | ||
710 | |||
711 | return err; | ||
712 | } | ||
713 | |||
714 | static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g) | ||
715 | { | ||
716 | int err; | ||
717 | |||
718 | err = gk20a_busy(g); | ||
719 | if (err) | ||
720 | return err; | ||
721 | |||
722 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
723 | err = g->ops.gr.resume_from_pause(g); | ||
724 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
725 | |||
726 | gk20a_idle(g); | ||
727 | |||
728 | return err; | ||
729 | } | ||
730 | |||
731 | static int nvgpu_gpu_ioctl_clear_sm_errors(struct gk20a *g) | ||
732 | { | ||
733 | int err; | ||
734 | |||
735 | err = gk20a_busy(g); | ||
736 | if (err) | ||
737 | return err; | ||
738 | |||
739 | err = g->ops.gr.clear_sm_errors(g); | ||
740 | |||
741 | gk20a_idle(g); | ||
742 | |||
743 | return err; | ||
744 | } | ||
745 | |||
746 | static int nvgpu_gpu_ioctl_has_any_exception( | ||
747 | struct gk20a *g, | ||
748 | struct nvgpu_gpu_tpc_exception_en_status_args *args) | ||
749 | { | ||
750 | u32 tpc_exception_en; | ||
751 | |||
752 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
753 | tpc_exception_en = g->ops.gr.tpc_enabled_exceptions(g); | ||
754 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
755 | |||
756 | args->tpc_exception_en_sm_mask = tpc_exception_en; | ||
757 | |||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | static int gk20a_ctrl_get_num_vsms(struct gk20a *g, | ||
762 | struct nvgpu_gpu_num_vsms *args) | ||
763 | { | ||
764 | struct gr_gk20a *gr = &g->gr; | ||
765 | args->num_vsms = gr->no_of_sm; | ||
766 | return 0; | ||
767 | } | ||
768 | |||
769 | static int gk20a_ctrl_vsm_mapping(struct gk20a *g, | ||
770 | struct nvgpu_gpu_vsms_mapping *args) | ||
771 | { | ||
772 | int err = 0; | ||
773 | struct gr_gk20a *gr = &g->gr; | ||
774 | size_t write_size = gr->no_of_sm * | ||
775 | sizeof(struct nvgpu_gpu_vsms_mapping_entry); | ||
776 | struct nvgpu_gpu_vsms_mapping_entry *vsms_buf; | ||
777 | u32 i; | ||
778 | |||
779 | vsms_buf = nvgpu_kzalloc(g, write_size); | ||
780 | if (vsms_buf == NULL) | ||
781 | return -ENOMEM; | ||
782 | |||
783 | for (i = 0; i < gr->no_of_sm; i++) { | ||
784 | vsms_buf[i].gpc_index = gr->sm_to_cluster[i].gpc_index; | ||
785 | if (g->ops.gr.get_nonpes_aware_tpc) | ||
786 | vsms_buf[i].tpc_index = | ||
787 | g->ops.gr.get_nonpes_aware_tpc(g, | ||
788 | gr->sm_to_cluster[i].gpc_index, | ||
789 | gr->sm_to_cluster[i].tpc_index); | ||
790 | else | ||
791 | vsms_buf[i].tpc_index = | ||
792 | gr->sm_to_cluster[i].tpc_index; | ||
793 | } | ||
794 | |||
795 | err = copy_to_user((void __user *)(uintptr_t) | ||
796 | args->vsms_map_buf_addr, | ||
797 | vsms_buf, write_size); | ||
798 | nvgpu_kfree(g, vsms_buf); | ||
799 | |||
800 | return err; | ||
801 | } | ||
802 | |||
803 | static int nvgpu_gpu_get_cpu_time_correlation_info( | ||
804 | struct gk20a *g, | ||
805 | struct nvgpu_gpu_get_cpu_time_correlation_info_args *args) | ||
806 | { | ||
807 | struct nvgpu_cpu_time_correlation_sample *samples; | ||
808 | int err; | ||
809 | u32 i; | ||
810 | |||
811 | if (args->count > NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_MAX_COUNT || | ||
812 | args->source_id != NVGPU_GPU_GET_CPU_TIME_CORRELATION_INFO_SRC_ID_TSC) | ||
813 | return -EINVAL; | ||
814 | |||
815 | samples = nvgpu_kzalloc(g, args->count * | ||
816 | sizeof(struct nvgpu_cpu_time_correlation_sample)); | ||
817 | if (!samples) { | ||
818 | return -ENOMEM; | ||
819 | } | ||
820 | |||
821 | err = g->ops.ptimer.get_timestamps_zipper(g, | ||
822 | args->source_id, args->count, samples); | ||
823 | if (!err) { | ||
824 | for (i = 0; i < args->count; i++) { | ||
825 | args->samples[i].cpu_timestamp = samples[i].cpu_timestamp; | ||
826 | args->samples[i].gpu_timestamp = samples[i].gpu_timestamp; | ||
827 | } | ||
828 | } | ||
829 | |||
830 | nvgpu_kfree(g, samples); | ||
831 | |||
832 | return err; | ||
833 | } | ||
834 | |||
835 | static int nvgpu_gpu_get_gpu_time( | ||
836 | struct gk20a *g, | ||
837 | struct nvgpu_gpu_get_gpu_time_args *args) | ||
838 | { | ||
839 | u64 time; | ||
840 | int err; | ||
841 | |||
842 | err = gk20a_busy(g); | ||
843 | if (err) | ||
844 | return err; | ||
845 | |||
846 | err = g->ops.ptimer.read_ptimer(g, &time); | ||
847 | if (!err) | ||
848 | args->gpu_timestamp = time; | ||
849 | |||
850 | gk20a_idle(g); | ||
851 | return err; | ||
852 | } | ||
853 | |||
854 | static int nvgpu_gpu_get_engine_info( | ||
855 | struct gk20a *g, | ||
856 | struct nvgpu_gpu_get_engine_info_args *args) | ||
857 | { | ||
858 | int err = 0; | ||
859 | u32 engine_enum = ENGINE_INVAL_GK20A; | ||
860 | u32 report_index = 0; | ||
861 | u32 engine_id_idx; | ||
862 | const u32 max_buffer_engines = args->engine_info_buf_size / | ||
863 | sizeof(struct nvgpu_gpu_get_engine_info_item); | ||
864 | struct nvgpu_gpu_get_engine_info_item __user *dst_item_list = | ||
865 | (void __user *)(uintptr_t)args->engine_info_buf_addr; | ||
866 | |||
867 | for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; | ||
868 | ++engine_id_idx) { | ||
869 | u32 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; | ||
870 | const struct fifo_engine_info_gk20a *src_info = | ||
871 | &g->fifo.engine_info[active_engine_id]; | ||
872 | struct nvgpu_gpu_get_engine_info_item dst_info; | ||
873 | |||
874 | memset(&dst_info, 0, sizeof(dst_info)); | ||
875 | |||
876 | engine_enum = src_info->engine_enum; | ||
877 | |||
878 | switch (engine_enum) { | ||
879 | case ENGINE_GR_GK20A: | ||
880 | dst_info.engine_id = NVGPU_GPU_ENGINE_ID_GR; | ||
881 | break; | ||
882 | |||
883 | case ENGINE_GRCE_GK20A: | ||
884 | dst_info.engine_id = NVGPU_GPU_ENGINE_ID_GR_COPY; | ||
885 | break; | ||
886 | |||
887 | case ENGINE_ASYNC_CE_GK20A: | ||
888 | dst_info.engine_id = NVGPU_GPU_ENGINE_ID_ASYNC_COPY; | ||
889 | break; | ||
890 | |||
891 | default: | ||
892 | nvgpu_err(g, "Unmapped engine enum %u", | ||
893 | engine_enum); | ||
894 | continue; | ||
895 | } | ||
896 | |||
897 | dst_info.engine_instance = src_info->inst_id; | ||
898 | dst_info.runlist_id = src_info->runlist_id; | ||
899 | |||
900 | if (report_index < max_buffer_engines) { | ||
901 | err = copy_to_user(&dst_item_list[report_index], | ||
902 | &dst_info, sizeof(dst_info)); | ||
903 | if (err) | ||
904 | goto clean_up; | ||
905 | } | ||
906 | |||
907 | ++report_index; | ||
908 | } | ||
909 | |||
910 | args->engine_info_buf_size = | ||
911 | report_index * sizeof(struct nvgpu_gpu_get_engine_info_item); | ||
912 | |||
913 | clean_up: | ||
914 | return err; | ||
915 | } | ||
916 | |||
917 | static int nvgpu_gpu_alloc_vidmem(struct gk20a *g, | ||
918 | struct nvgpu_gpu_alloc_vidmem_args *args) | ||
919 | { | ||
920 | u32 align = args->in.alignment ? args->in.alignment : SZ_4K; | ||
921 | int fd; | ||
922 | |||
923 | nvgpu_log_fn(g, " "); | ||
924 | |||
925 | /* not yet supported */ | ||
926 | if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK)) | ||
927 | return -EINVAL; | ||
928 | |||
929 | /* not yet supported */ | ||
930 | if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_VPR)) | ||
931 | return -EINVAL; | ||
932 | |||
933 | if (args->in.size & (SZ_4K - 1)) | ||
934 | return -EINVAL; | ||
935 | |||
936 | if (!args->in.size) | ||
937 | return -EINVAL; | ||
938 | |||
939 | if (align & (align - 1)) | ||
940 | return -EINVAL; | ||
941 | |||
942 | if (align > roundup_pow_of_two(args->in.size)) { | ||
943 | /* log this special case, buddy allocator detail */ | ||
944 | nvgpu_warn(g, | ||
945 | "alignment larger than buffer size rounded up to power of 2 is not supported"); | ||
946 | return -EINVAL; | ||
947 | } | ||
948 | |||
949 | fd = nvgpu_vidmem_export_linux(g, args->in.size); | ||
950 | if (fd < 0) | ||
951 | return fd; | ||
952 | |||
953 | args->out.dmabuf_fd = fd; | ||
954 | |||
955 | nvgpu_log_fn(g, "done, fd=%d", fd); | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | static int nvgpu_gpu_get_memory_state(struct gk20a *g, | ||
961 | struct nvgpu_gpu_get_memory_state_args *args) | ||
962 | { | ||
963 | int err; | ||
964 | |||
965 | nvgpu_log_fn(g, " "); | ||
966 | |||
967 | if (args->reserved[0] || args->reserved[1] || | ||
968 | args->reserved[2] || args->reserved[3]) | ||
969 | return -EINVAL; | ||
970 | |||
971 | err = nvgpu_vidmem_get_space(g, &args->total_free_bytes); | ||
972 | |||
973 | nvgpu_log_fn(g, "done, err=%d, bytes=%lld", err, args->total_free_bytes); | ||
974 | |||
975 | return err; | ||
976 | } | ||
977 | |||
978 | static u32 nvgpu_gpu_convert_clk_domain(u32 clk_domain) | ||
979 | { | ||
980 | u32 domain = 0; | ||
981 | |||
982 | if (clk_domain == NVGPU_GPU_CLK_DOMAIN_MCLK) | ||
983 | domain = NVGPU_CLK_DOMAIN_MCLK; | ||
984 | else if (clk_domain == NVGPU_GPU_CLK_DOMAIN_GPCCLK) | ||
985 | domain = NVGPU_CLK_DOMAIN_GPCCLK; | ||
986 | else | ||
987 | domain = NVGPU_CLK_DOMAIN_MAX + 1; | ||
988 | |||
989 | return domain; | ||
990 | } | ||
991 | |||
992 | static int nvgpu_gpu_clk_get_vf_points(struct gk20a *g, | ||
993 | struct gk20a_ctrl_priv *priv, | ||
994 | struct nvgpu_gpu_clk_vf_points_args *args) | ||
995 | { | ||
996 | struct nvgpu_gpu_clk_vf_point clk_point; | ||
997 | struct nvgpu_gpu_clk_vf_point __user *entry; | ||
998 | struct nvgpu_clk_session *session = priv->clk_session; | ||
999 | u32 clk_domains = 0; | ||
1000 | int err; | ||
1001 | u16 last_mhz; | ||
1002 | u16 *fpoints; | ||
1003 | u32 i; | ||
1004 | u32 max_points = 0; | ||
1005 | u32 num_points = 0; | ||
1006 | u16 min_mhz; | ||
1007 | u16 max_mhz; | ||
1008 | |||
1009 | nvgpu_log_fn(g, " "); | ||
1010 | |||
1011 | if (!session || args->flags) | ||
1012 | return -EINVAL; | ||
1013 | |||
1014 | clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g); | ||
1015 | args->num_entries = 0; | ||
1016 | |||
1017 | if (!nvgpu_clk_arb_is_valid_domain(g, | ||
1018 | nvgpu_gpu_convert_clk_domain(args->clk_domain))) | ||
1019 | return -EINVAL; | ||
1020 | |||
1021 | err = nvgpu_clk_arb_get_arbiter_clk_f_points(g, | ||
1022 | nvgpu_gpu_convert_clk_domain(args->clk_domain), | ||
1023 | &max_points, NULL); | ||
1024 | if (err) | ||
1025 | return err; | ||
1026 | |||
1027 | if (!args->max_entries) { | ||
1028 | args->max_entries = max_points; | ||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
1032 | if (args->max_entries < max_points) | ||
1033 | return -EINVAL; | ||
1034 | |||
1035 | err = nvgpu_clk_arb_get_arbiter_clk_range(g, | ||
1036 | nvgpu_gpu_convert_clk_domain(args->clk_domain), | ||
1037 | &min_mhz, &max_mhz); | ||
1038 | if (err) | ||
1039 | return err; | ||
1040 | |||
1041 | fpoints = nvgpu_kcalloc(g, max_points, sizeof(u16)); | ||
1042 | if (!fpoints) | ||
1043 | return -ENOMEM; | ||
1044 | |||
1045 | err = nvgpu_clk_arb_get_arbiter_clk_f_points(g, | ||
1046 | nvgpu_gpu_convert_clk_domain(args->clk_domain), | ||
1047 | &max_points, fpoints); | ||
1048 | if (err) | ||
1049 | goto fail; | ||
1050 | |||
1051 | entry = (struct nvgpu_gpu_clk_vf_point __user *) | ||
1052 | (uintptr_t)args->clk_vf_point_entries; | ||
1053 | |||
1054 | last_mhz = 0; | ||
1055 | num_points = 0; | ||
1056 | for (i = 0; (i < max_points) && !err; i++) { | ||
1057 | |||
1058 | /* filter out duplicate frequencies */ | ||
1059 | if (fpoints[i] == last_mhz) | ||
1060 | continue; | ||
1061 | |||
1062 | /* filter out out-of-range frequencies */ | ||
1063 | if ((fpoints[i] < min_mhz) || (fpoints[i] > max_mhz)) | ||
1064 | continue; | ||
1065 | |||
1066 | last_mhz = fpoints[i]; | ||
1067 | clk_point.freq_hz = MHZ_TO_HZ(fpoints[i]); | ||
1068 | |||
1069 | err = copy_to_user((void __user *)entry, &clk_point, | ||
1070 | sizeof(clk_point)); | ||
1071 | |||
1072 | num_points++; | ||
1073 | entry++; | ||
1074 | } | ||
1075 | |||
1076 | args->num_entries = num_points; | ||
1077 | |||
1078 | fail: | ||
1079 | nvgpu_kfree(g, fpoints); | ||
1080 | return err; | ||
1081 | } | ||
1082 | |||
1083 | static int nvgpu_gpu_clk_get_range(struct gk20a *g, | ||
1084 | struct gk20a_ctrl_priv *priv, | ||
1085 | struct nvgpu_gpu_clk_range_args *args) | ||
1086 | { | ||
1087 | struct nvgpu_gpu_clk_range clk_range; | ||
1088 | struct nvgpu_gpu_clk_range __user *entry; | ||
1089 | struct nvgpu_clk_session *session = priv->clk_session; | ||
1090 | |||
1091 | u32 clk_domains = 0; | ||
1092 | u32 num_domains; | ||
1093 | u32 num_entries; | ||
1094 | u32 i; | ||
1095 | int bit; | ||
1096 | int err; | ||
1097 | u16 min_mhz, max_mhz; | ||
1098 | |||
1099 | nvgpu_log_fn(g, " "); | ||
1100 | |||
1101 | if (!session) | ||
1102 | return -EINVAL; | ||
1103 | |||
1104 | clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g); | ||
1105 | num_domains = hweight_long(clk_domains); | ||
1106 | |||
1107 | if (!args->flags) { | ||
1108 | if (!args->num_entries) { | ||
1109 | args->num_entries = num_domains; | ||
1110 | return 0; | ||
1111 | } | ||
1112 | |||
1113 | if (args->num_entries < num_domains) | ||
1114 | return -EINVAL; | ||
1115 | |||
1116 | args->num_entries = 0; | ||
1117 | num_entries = num_domains; | ||
1118 | |||
1119 | } else { | ||
1120 | if (args->flags != NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) | ||
1121 | return -EINVAL; | ||
1122 | |||
1123 | num_entries = args->num_entries; | ||
1124 | if (num_entries > num_domains) | ||
1125 | return -EINVAL; | ||
1126 | } | ||
1127 | |||
1128 | entry = (struct nvgpu_gpu_clk_range __user *) | ||
1129 | (uintptr_t)args->clk_range_entries; | ||
1130 | |||
1131 | for (i = 0; i < num_entries; i++, entry++) { | ||
1132 | |||
1133 | if (args->flags == NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) { | ||
1134 | if (copy_from_user(&clk_range, (void __user *)entry, | ||
1135 | sizeof(clk_range))) | ||
1136 | return -EFAULT; | ||
1137 | } else { | ||
1138 | bit = ffs(clk_domains) - 1; | ||
1139 | clk_range.clk_domain = bit; | ||
1140 | clk_domains &= ~BIT(bit); | ||
1141 | } | ||
1142 | |||
1143 | clk_range.flags = 0; | ||
1144 | err = nvgpu_clk_arb_get_arbiter_clk_range(g, | ||
1145 | nvgpu_gpu_convert_clk_domain(clk_range.clk_domain), | ||
1146 | &min_mhz, &max_mhz); | ||
1147 | clk_range.min_hz = MHZ_TO_HZ(min_mhz); | ||
1148 | clk_range.max_hz = MHZ_TO_HZ(max_mhz); | ||
1149 | |||
1150 | if (err) | ||
1151 | return err; | ||
1152 | |||
1153 | err = copy_to_user(entry, &clk_range, sizeof(clk_range)); | ||
1154 | if (err) | ||
1155 | return -EFAULT; | ||
1156 | } | ||
1157 | |||
1158 | args->num_entries = num_entries; | ||
1159 | |||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | static int nvgpu_gpu_clk_set_info(struct gk20a *g, | ||
1164 | struct gk20a_ctrl_priv *priv, | ||
1165 | struct nvgpu_gpu_clk_set_info_args *args) | ||
1166 | { | ||
1167 | struct nvgpu_gpu_clk_info clk_info; | ||
1168 | struct nvgpu_gpu_clk_info __user *entry; | ||
1169 | struct nvgpu_clk_session *session = priv->clk_session; | ||
1170 | |||
1171 | int fd; | ||
1172 | u32 clk_domains = 0; | ||
1173 | u16 freq_mhz; | ||
1174 | int i; | ||
1175 | int ret; | ||
1176 | |||
1177 | nvgpu_log_fn(g, " "); | ||
1178 | |||
1179 | if (!session || args->flags) | ||
1180 | return -EINVAL; | ||
1181 | |||
1182 | clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g); | ||
1183 | if (!clk_domains) | ||
1184 | return -EINVAL; | ||
1185 | |||
1186 | entry = (struct nvgpu_gpu_clk_info __user *) | ||
1187 | (uintptr_t)args->clk_info_entries; | ||
1188 | |||
1189 | for (i = 0; i < args->num_entries; i++, entry++) { | ||
1190 | |||
1191 | if (copy_from_user(&clk_info, entry, sizeof(clk_info))) | ||
1192 | return -EFAULT; | ||
1193 | |||
1194 | if (!nvgpu_clk_arb_is_valid_domain(g, | ||
1195 | nvgpu_gpu_convert_clk_domain(clk_info.clk_domain))) | ||
1196 | return -EINVAL; | ||
1197 | } | ||
1198 | |||
1199 | entry = (struct nvgpu_gpu_clk_info __user *) | ||
1200 | (uintptr_t)args->clk_info_entries; | ||
1201 | |||
1202 | ret = nvgpu_clk_arb_install_request_fd(g, session, &fd); | ||
1203 | if (ret < 0) | ||
1204 | return ret; | ||
1205 | |||
1206 | for (i = 0; i < args->num_entries; i++, entry++) { | ||
1207 | |||
1208 | if (copy_from_user(&clk_info, (void __user *)entry, | ||
1209 | sizeof(clk_info))) | ||
1210 | return -EFAULT; | ||
1211 | freq_mhz = HZ_TO_MHZ(clk_info.freq_hz); | ||
1212 | |||
1213 | nvgpu_clk_arb_set_session_target_mhz(session, fd, | ||
1214 | nvgpu_gpu_convert_clk_domain(clk_info.clk_domain), freq_mhz); | ||
1215 | } | ||
1216 | |||
1217 | ret = nvgpu_clk_arb_commit_request_fd(g, session, fd); | ||
1218 | if (ret < 0) | ||
1219 | return ret; | ||
1220 | |||
1221 | args->completion_fd = fd; | ||
1222 | |||
1223 | return ret; | ||
1224 | } | ||
1225 | |||
1226 | static int nvgpu_gpu_clk_get_info(struct gk20a *g, | ||
1227 | struct gk20a_ctrl_priv *priv, | ||
1228 | struct nvgpu_gpu_clk_get_info_args *args) | ||
1229 | { | ||
1230 | struct nvgpu_gpu_clk_info clk_info; | ||
1231 | struct nvgpu_gpu_clk_info __user *entry; | ||
1232 | struct nvgpu_clk_session *session = priv->clk_session; | ||
1233 | u32 clk_domains = 0; | ||
1234 | u32 num_domains; | ||
1235 | u32 num_entries; | ||
1236 | u32 i; | ||
1237 | u16 freq_mhz; | ||
1238 | int err; | ||
1239 | int bit; | ||
1240 | |||
1241 | nvgpu_log_fn(g, " "); | ||
1242 | |||
1243 | if (!session) | ||
1244 | return -EINVAL; | ||
1245 | |||
1246 | clk_domains = nvgpu_clk_arb_get_arbiter_clk_domains(g); | ||
1247 | num_domains = hweight_long(clk_domains); | ||
1248 | |||
1249 | if (!args->flags) { | ||
1250 | if (!args->num_entries) { | ||
1251 | args->num_entries = num_domains; | ||
1252 | return 0; | ||
1253 | } | ||
1254 | |||
1255 | if (args->num_entries < num_domains) | ||
1256 | return -EINVAL; | ||
1257 | |||
1258 | args->num_entries = 0; | ||
1259 | num_entries = num_domains; | ||
1260 | |||
1261 | } else { | ||
1262 | if (args->flags != NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) | ||
1263 | return -EINVAL; | ||
1264 | |||
1265 | num_entries = args->num_entries; | ||
1266 | if (num_entries > num_domains * 3) | ||
1267 | return -EINVAL; | ||
1268 | } | ||
1269 | |||
1270 | entry = (struct nvgpu_gpu_clk_info __user *) | ||
1271 | (uintptr_t)args->clk_info_entries; | ||
1272 | |||
1273 | for (i = 0; i < num_entries; i++, entry++) { | ||
1274 | |||
1275 | if (args->flags == NVGPU_GPU_CLK_FLAG_SPECIFIC_DOMAINS) { | ||
1276 | if (copy_from_user(&clk_info, (void __user *)entry, | ||
1277 | sizeof(clk_info))) | ||
1278 | return -EFAULT; | ||
1279 | } else { | ||
1280 | bit = ffs(clk_domains) - 1; | ||
1281 | clk_info.clk_domain = bit; | ||
1282 | clk_domains &= ~BIT(bit); | ||
1283 | clk_info.clk_type = args->clk_type; | ||
1284 | } | ||
1285 | |||
1286 | switch (clk_info.clk_type) { | ||
1287 | case NVGPU_GPU_CLK_TYPE_TARGET: | ||
1288 | err = nvgpu_clk_arb_get_session_target_mhz(session, | ||
1289 | nvgpu_gpu_convert_clk_domain(clk_info.clk_domain), | ||
1290 | &freq_mhz); | ||
1291 | break; | ||
1292 | case NVGPU_GPU_CLK_TYPE_ACTUAL: | ||
1293 | err = nvgpu_clk_arb_get_arbiter_actual_mhz(g, | ||
1294 | nvgpu_gpu_convert_clk_domain(clk_info.clk_domain), | ||
1295 | &freq_mhz); | ||
1296 | break; | ||
1297 | case NVGPU_GPU_CLK_TYPE_EFFECTIVE: | ||
1298 | err = nvgpu_clk_arb_get_arbiter_effective_mhz(g, | ||
1299 | nvgpu_gpu_convert_clk_domain(clk_info.clk_domain), | ||
1300 | &freq_mhz); | ||
1301 | break; | ||
1302 | default: | ||
1303 | freq_mhz = 0; | ||
1304 | err = -EINVAL; | ||
1305 | break; | ||
1306 | } | ||
1307 | if (err) | ||
1308 | return err; | ||
1309 | |||
1310 | clk_info.flags = 0; | ||
1311 | clk_info.freq_hz = MHZ_TO_HZ(freq_mhz); | ||
1312 | |||
1313 | err = copy_to_user((void __user *)entry, &clk_info, | ||
1314 | sizeof(clk_info)); | ||
1315 | if (err) | ||
1316 | return -EFAULT; | ||
1317 | } | ||
1318 | |||
1319 | args->num_entries = num_entries; | ||
1320 | |||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | static int nvgpu_gpu_get_event_fd(struct gk20a *g, | ||
1325 | struct gk20a_ctrl_priv *priv, | ||
1326 | struct nvgpu_gpu_get_event_fd_args *args) | ||
1327 | { | ||
1328 | struct nvgpu_clk_session *session = priv->clk_session; | ||
1329 | |||
1330 | nvgpu_log_fn(g, " "); | ||
1331 | |||
1332 | if (!session) | ||
1333 | return -EINVAL; | ||
1334 | |||
1335 | return nvgpu_clk_arb_install_event_fd(g, session, &args->event_fd, | ||
1336 | args->flags); | ||
1337 | } | ||
1338 | |||
1339 | static int nvgpu_gpu_get_voltage(struct gk20a *g, | ||
1340 | struct nvgpu_gpu_get_voltage_args *args) | ||
1341 | { | ||
1342 | int err = -EINVAL; | ||
1343 | |||
1344 | nvgpu_log_fn(g, " "); | ||
1345 | |||
1346 | if (args->reserved) | ||
1347 | return -EINVAL; | ||
1348 | |||
1349 | if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_GET_VOLTAGE)) | ||
1350 | return -EINVAL; | ||
1351 | |||
1352 | err = gk20a_busy(g); | ||
1353 | if (err) | ||
1354 | return err; | ||
1355 | |||
1356 | switch (args->which) { | ||
1357 | case NVGPU_GPU_VOLTAGE_CORE: | ||
1358 | err = volt_get_voltage(g, CTRL_VOLT_DOMAIN_LOGIC, &args->voltage); | ||
1359 | break; | ||
1360 | case NVGPU_GPU_VOLTAGE_SRAM: | ||
1361 | err = volt_get_voltage(g, CTRL_VOLT_DOMAIN_SRAM, &args->voltage); | ||
1362 | break; | ||
1363 | case NVGPU_GPU_VOLTAGE_BUS: | ||
1364 | err = pmgr_pwr_devices_get_voltage(g, &args->voltage); | ||
1365 | break; | ||
1366 | default: | ||
1367 | err = -EINVAL; | ||
1368 | } | ||
1369 | |||
1370 | gk20a_idle(g); | ||
1371 | |||
1372 | return err; | ||
1373 | } | ||
1374 | |||
1375 | static int nvgpu_gpu_get_current(struct gk20a *g, | ||
1376 | struct nvgpu_gpu_get_current_args *args) | ||
1377 | { | ||
1378 | int err; | ||
1379 | |||
1380 | nvgpu_log_fn(g, " "); | ||
1381 | |||
1382 | if (args->reserved[0] || args->reserved[1] || args->reserved[2]) | ||
1383 | return -EINVAL; | ||
1384 | |||
1385 | if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_GET_CURRENT)) | ||
1386 | return -EINVAL; | ||
1387 | |||
1388 | err = gk20a_busy(g); | ||
1389 | if (err) | ||
1390 | return err; | ||
1391 | |||
1392 | err = pmgr_pwr_devices_get_current(g, &args->currnt); | ||
1393 | |||
1394 | gk20a_idle(g); | ||
1395 | |||
1396 | return err; | ||
1397 | } | ||
1398 | |||
1399 | static int nvgpu_gpu_get_power(struct gk20a *g, | ||
1400 | struct nvgpu_gpu_get_power_args *args) | ||
1401 | { | ||
1402 | int err; | ||
1403 | |||
1404 | nvgpu_log_fn(g, " "); | ||
1405 | |||
1406 | if (args->reserved[0] || args->reserved[1] || args->reserved[2]) | ||
1407 | return -EINVAL; | ||
1408 | |||
1409 | if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_GET_POWER)) | ||
1410 | return -EINVAL; | ||
1411 | |||
1412 | err = gk20a_busy(g); | ||
1413 | if (err) | ||
1414 | return err; | ||
1415 | |||
1416 | err = pmgr_pwr_devices_get_power(g, &args->power); | ||
1417 | |||
1418 | gk20a_idle(g); | ||
1419 | |||
1420 | return err; | ||
1421 | } | ||
1422 | |||
1423 | static int nvgpu_gpu_get_temperature(struct gk20a *g, | ||
1424 | struct nvgpu_gpu_get_temperature_args *args) | ||
1425 | { | ||
1426 | int err; | ||
1427 | u32 temp_f24_8; | ||
1428 | |||
1429 | nvgpu_log_fn(g, " "); | ||
1430 | |||
1431 | if (args->reserved[0] || args->reserved[1] || args->reserved[2]) | ||
1432 | return -EINVAL; | ||
1433 | |||
1434 | if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_GET_TEMPERATURE)) | ||
1435 | return -EINVAL; | ||
1436 | |||
1437 | if (!g->ops.therm.get_internal_sensor_curr_temp) | ||
1438 | return -EINVAL; | ||
1439 | |||
1440 | err = gk20a_busy(g); | ||
1441 | if (err) | ||
1442 | return err; | ||
1443 | |||
1444 | err = g->ops.therm.get_internal_sensor_curr_temp(g, &temp_f24_8); | ||
1445 | |||
1446 | gk20a_idle(g); | ||
1447 | |||
1448 | args->temp_f24_8 = (s32)temp_f24_8; | ||
1449 | |||
1450 | return err; | ||
1451 | } | ||
1452 | |||
1453 | static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g, | ||
1454 | struct nvgpu_gpu_set_therm_alert_limit_args *args) | ||
1455 | { | ||
1456 | int err; | ||
1457 | |||
1458 | nvgpu_log_fn(g, " "); | ||
1459 | |||
1460 | if (args->reserved[0] || args->reserved[1] || args->reserved[2]) | ||
1461 | return -EINVAL; | ||
1462 | |||
1463 | if (!g->ops.therm.configure_therm_alert) | ||
1464 | return -EINVAL; | ||
1465 | |||
1466 | err = gk20a_busy(g); | ||
1467 | if (err) | ||
1468 | return err; | ||
1469 | |||
1470 | err = g->ops.therm.configure_therm_alert(g, args->temp_f24_8); | ||
1471 | |||
1472 | gk20a_idle(g); | ||
1473 | |||
1474 | return err; | ||
1475 | } | ||
1476 | |||
1477 | static int nvgpu_gpu_set_deterministic_ch_railgate(struct channel_gk20a *ch, | ||
1478 | u32 flags) | ||
1479 | { | ||
1480 | int err = 0; | ||
1481 | bool allow; | ||
1482 | bool disallow; | ||
1483 | |||
1484 | allow = flags & | ||
1485 | NVGPU_GPU_SET_DETERMINISTIC_OPTS_FLAGS_ALLOW_RAILGATING; | ||
1486 | |||
1487 | disallow = flags & | ||
1488 | NVGPU_GPU_SET_DETERMINISTIC_OPTS_FLAGS_DISALLOW_RAILGATING; | ||
1489 | |||
1490 | /* Can't be both at the same time */ | ||
1491 | if (allow && disallow) | ||
1492 | return -EINVAL; | ||
1493 | |||
1494 | /* Nothing to do */ | ||
1495 | if (!allow && !disallow) | ||
1496 | return 0; | ||
1497 | |||
1498 | /* | ||
1499 | * Moving into explicit idle or back from it? A call that doesn't | ||
1500 | * change the status is a no-op. | ||
1501 | */ | ||
1502 | if (!ch->deterministic_railgate_allowed && | ||
1503 | allow) { | ||
1504 | gk20a_idle(ch->g); | ||
1505 | } else if (ch->deterministic_railgate_allowed && | ||
1506 | !allow) { | ||
1507 | err = gk20a_busy(ch->g); | ||
1508 | if (err) { | ||
1509 | nvgpu_warn(ch->g, | ||
1510 | "cannot busy to restore deterministic ch"); | ||
1511 | return err; | ||
1512 | } | ||
1513 | } | ||
1514 | ch->deterministic_railgate_allowed = allow; | ||
1515 | |||
1516 | return err; | ||
1517 | } | ||
1518 | |||
1519 | static int nvgpu_gpu_set_deterministic_ch(struct channel_gk20a *ch, u32 flags) | ||
1520 | { | ||
1521 | if (!ch->deterministic) | ||
1522 | return -EINVAL; | ||
1523 | |||
1524 | return nvgpu_gpu_set_deterministic_ch_railgate(ch, flags); | ||
1525 | } | ||
1526 | |||
1527 | static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g, | ||
1528 | struct nvgpu_gpu_set_deterministic_opts_args *args) | ||
1529 | { | ||
1530 | int __user *user_channels; | ||
1531 | u32 i = 0; | ||
1532 | int err = 0; | ||
1533 | |||
1534 | nvgpu_log_fn(g, " "); | ||
1535 | |||
1536 | user_channels = (int __user *)(uintptr_t)args->channels; | ||
1537 | |||
1538 | /* Upper limit; prevent holding deterministic_busy for long */ | ||
1539 | if (args->num_channels > g->fifo.num_channels) { | ||
1540 | err = -EINVAL; | ||
1541 | goto out; | ||
1542 | } | ||
1543 | |||
1544 | /* Trivial sanity check first */ | ||
1545 | if (!access_ok(VERIFY_READ, user_channels, | ||
1546 | args->num_channels * sizeof(int))) { | ||
1547 | err = -EFAULT; | ||
1548 | goto out; | ||
1549 | } | ||
1550 | |||
1551 | nvgpu_rwsem_down_read(&g->deterministic_busy); | ||
1552 | |||
1553 | /* note: we exit at the first failure */ | ||
1554 | for (; i < args->num_channels; i++) { | ||
1555 | int ch_fd = 0; | ||
1556 | struct channel_gk20a *ch; | ||
1557 | |||
1558 | if (copy_from_user(&ch_fd, &user_channels[i], sizeof(int))) { | ||
1559 | /* User raced with above access_ok */ | ||
1560 | err = -EFAULT; | ||
1561 | break; | ||
1562 | } | ||
1563 | |||
1564 | ch = gk20a_get_channel_from_file(ch_fd); | ||
1565 | if (!ch) { | ||
1566 | err = -EINVAL; | ||
1567 | break; | ||
1568 | } | ||
1569 | |||
1570 | err = nvgpu_gpu_set_deterministic_ch(ch, args->flags); | ||
1571 | |||
1572 | gk20a_channel_put(ch); | ||
1573 | |||
1574 | if (err) | ||
1575 | break; | ||
1576 | } | ||
1577 | |||
1578 | nvgpu_rwsem_up_read(&g->deterministic_busy); | ||
1579 | |||
1580 | out: | ||
1581 | args->num_channels = i; | ||
1582 | return err; | ||
1583 | } | ||
1584 | |||
1585 | static int nvgpu_gpu_read_single_sm_error_state(struct gk20a *g, | ||
1586 | struct nvgpu_gpu_read_single_sm_error_state_args *args) | ||
1587 | { | ||
1588 | struct gr_gk20a *gr = &g->gr; | ||
1589 | struct nvgpu_gr_sm_error_state *sm_error_state; | ||
1590 | struct nvgpu_gpu_sm_error_state_record sm_error_state_record; | ||
1591 | u32 sm_id; | ||
1592 | int err = 0; | ||
1593 | |||
1594 | sm_id = args->sm_id; | ||
1595 | if (sm_id >= gr->no_of_sm) | ||
1596 | return -EINVAL; | ||
1597 | |||
1598 | nvgpu_speculation_barrier(); | ||
1599 | |||
1600 | sm_error_state = gr->sm_error_states + sm_id; | ||
1601 | sm_error_state_record.global_esr = | ||
1602 | sm_error_state->hww_global_esr; | ||
1603 | sm_error_state_record.warp_esr = | ||
1604 | sm_error_state->hww_warp_esr; | ||
1605 | sm_error_state_record.warp_esr_pc = | ||
1606 | sm_error_state->hww_warp_esr_pc; | ||
1607 | sm_error_state_record.global_esr_report_mask = | ||
1608 | sm_error_state->hww_global_esr_report_mask; | ||
1609 | sm_error_state_record.warp_esr_report_mask = | ||
1610 | sm_error_state->hww_warp_esr_report_mask; | ||
1611 | |||
1612 | if (args->record_size > 0) { | ||
1613 | size_t write_size = sizeof(*sm_error_state); | ||
1614 | |||
1615 | if (write_size > args->record_size) | ||
1616 | write_size = args->record_size; | ||
1617 | |||
1618 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1619 | err = copy_to_user((void __user *)(uintptr_t) | ||
1620 | args->record_mem, | ||
1621 | &sm_error_state_record, | ||
1622 | write_size); | ||
1623 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1624 | if (err) { | ||
1625 | nvgpu_err(g, "copy_to_user failed!"); | ||
1626 | return err; | ||
1627 | } | ||
1628 | |||
1629 | args->record_size = write_size; | ||
1630 | } | ||
1631 | |||
1632 | return 0; | ||
1633 | } | ||
1634 | |||
1635 | long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
1636 | { | ||
1637 | struct gk20a_ctrl_priv *priv = filp->private_data; | ||
1638 | struct gk20a *g = priv->g; | ||
1639 | struct nvgpu_gpu_zcull_get_ctx_size_args *get_ctx_size_args; | ||
1640 | struct nvgpu_gpu_zcull_get_info_args *get_info_args; | ||
1641 | struct nvgpu_gpu_zbc_set_table_args *set_table_args; | ||
1642 | struct nvgpu_gpu_zbc_query_table_args *query_table_args; | ||
1643 | u8 buf[NVGPU_GPU_IOCTL_MAX_ARG_SIZE]; | ||
1644 | struct gr_zcull_info *zcull_info; | ||
1645 | struct zbc_entry *zbc_val; | ||
1646 | struct zbc_query_params *zbc_tbl; | ||
1647 | int i, err = 0; | ||
1648 | |||
1649 | nvgpu_log_fn(g, "start %d", _IOC_NR(cmd)); | ||
1650 | |||
1651 | if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) || | ||
1652 | (_IOC_NR(cmd) == 0) || | ||
1653 | (_IOC_NR(cmd) > NVGPU_GPU_IOCTL_LAST) || | ||
1654 | (_IOC_SIZE(cmd) > NVGPU_GPU_IOCTL_MAX_ARG_SIZE)) | ||
1655 | return -EINVAL; | ||
1656 | |||
1657 | memset(buf, 0, sizeof(buf)); | ||
1658 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
1659 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
1660 | return -EFAULT; | ||
1661 | } | ||
1662 | |||
1663 | if (!g->sw_ready) { | ||
1664 | err = gk20a_busy(g); | ||
1665 | if (err) | ||
1666 | return err; | ||
1667 | |||
1668 | gk20a_idle(g); | ||
1669 | } | ||
1670 | |||
1671 | switch (cmd) { | ||
1672 | case NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE: | ||
1673 | get_ctx_size_args = (struct nvgpu_gpu_zcull_get_ctx_size_args *)buf; | ||
1674 | |||
1675 | get_ctx_size_args->size = gr_gk20a_get_ctxsw_zcull_size(g, &g->gr); | ||
1676 | |||
1677 | break; | ||
1678 | case NVGPU_GPU_IOCTL_ZCULL_GET_INFO: | ||
1679 | get_info_args = (struct nvgpu_gpu_zcull_get_info_args *)buf; | ||
1680 | |||
1681 | memset(get_info_args, 0, sizeof(struct nvgpu_gpu_zcull_get_info_args)); | ||
1682 | |||
1683 | zcull_info = nvgpu_kzalloc(g, sizeof(struct gr_zcull_info)); | ||
1684 | if (zcull_info == NULL) | ||
1685 | return -ENOMEM; | ||
1686 | |||
1687 | err = g->ops.gr.get_zcull_info(g, &g->gr, zcull_info); | ||
1688 | if (err) { | ||
1689 | nvgpu_kfree(g, zcull_info); | ||
1690 | break; | ||
1691 | } | ||
1692 | |||
1693 | get_info_args->width_align_pixels = zcull_info->width_align_pixels; | ||
1694 | get_info_args->height_align_pixels = zcull_info->height_align_pixels; | ||
1695 | get_info_args->pixel_squares_by_aliquots = zcull_info->pixel_squares_by_aliquots; | ||
1696 | get_info_args->aliquot_total = zcull_info->aliquot_total; | ||
1697 | get_info_args->region_byte_multiplier = zcull_info->region_byte_multiplier; | ||
1698 | get_info_args->region_header_size = zcull_info->region_header_size; | ||
1699 | get_info_args->subregion_header_size = zcull_info->subregion_header_size; | ||
1700 | get_info_args->subregion_width_align_pixels = zcull_info->subregion_width_align_pixels; | ||
1701 | get_info_args->subregion_height_align_pixels = zcull_info->subregion_height_align_pixels; | ||
1702 | get_info_args->subregion_count = zcull_info->subregion_count; | ||
1703 | |||
1704 | nvgpu_kfree(g, zcull_info); | ||
1705 | break; | ||
1706 | case NVGPU_GPU_IOCTL_ZBC_SET_TABLE: | ||
1707 | set_table_args = (struct nvgpu_gpu_zbc_set_table_args *)buf; | ||
1708 | |||
1709 | zbc_val = nvgpu_kzalloc(g, sizeof(struct zbc_entry)); | ||
1710 | if (zbc_val == NULL) | ||
1711 | return -ENOMEM; | ||
1712 | |||
1713 | zbc_val->format = set_table_args->format; | ||
1714 | zbc_val->type = set_table_args->type; | ||
1715 | |||
1716 | switch (zbc_val->type) { | ||
1717 | case GK20A_ZBC_TYPE_COLOR: | ||
1718 | for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { | ||
1719 | zbc_val->color_ds[i] = set_table_args->color_ds[i]; | ||
1720 | zbc_val->color_l2[i] = set_table_args->color_l2[i]; | ||
1721 | } | ||
1722 | break; | ||
1723 | case GK20A_ZBC_TYPE_DEPTH: | ||
1724 | case T19X_ZBC: | ||
1725 | zbc_val->depth = set_table_args->depth; | ||
1726 | break; | ||
1727 | default: | ||
1728 | err = -EINVAL; | ||
1729 | } | ||
1730 | |||
1731 | if (!err) { | ||
1732 | err = gk20a_busy(g); | ||
1733 | if (!err) { | ||
1734 | err = g->ops.gr.zbc_set_table(g, &g->gr, | ||
1735 | zbc_val); | ||
1736 | gk20a_idle(g); | ||
1737 | } | ||
1738 | } | ||
1739 | |||
1740 | if (zbc_val) | ||
1741 | nvgpu_kfree(g, zbc_val); | ||
1742 | break; | ||
1743 | case NVGPU_GPU_IOCTL_ZBC_QUERY_TABLE: | ||
1744 | query_table_args = (struct nvgpu_gpu_zbc_query_table_args *)buf; | ||
1745 | |||
1746 | zbc_tbl = nvgpu_kzalloc(g, sizeof(struct zbc_query_params)); | ||
1747 | if (zbc_tbl == NULL) | ||
1748 | return -ENOMEM; | ||
1749 | |||
1750 | zbc_tbl->type = query_table_args->type; | ||
1751 | zbc_tbl->index_size = query_table_args->index_size; | ||
1752 | |||
1753 | err = g->ops.gr.zbc_query_table(g, &g->gr, zbc_tbl); | ||
1754 | |||
1755 | if (!err) { | ||
1756 | switch (zbc_tbl->type) { | ||
1757 | case GK20A_ZBC_TYPE_COLOR: | ||
1758 | for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { | ||
1759 | query_table_args->color_ds[i] = zbc_tbl->color_ds[i]; | ||
1760 | query_table_args->color_l2[i] = zbc_tbl->color_l2[i]; | ||
1761 | } | ||
1762 | break; | ||
1763 | case GK20A_ZBC_TYPE_DEPTH: | ||
1764 | case T19X_ZBC: | ||
1765 | query_table_args->depth = zbc_tbl->depth; | ||
1766 | break; | ||
1767 | case GK20A_ZBC_TYPE_INVALID: | ||
1768 | query_table_args->index_size = zbc_tbl->index_size; | ||
1769 | break; | ||
1770 | default: | ||
1771 | err = -EINVAL; | ||
1772 | } | ||
1773 | if (!err) { | ||
1774 | query_table_args->format = zbc_tbl->format; | ||
1775 | query_table_args->ref_cnt = zbc_tbl->ref_cnt; | ||
1776 | } | ||
1777 | } | ||
1778 | |||
1779 | if (zbc_tbl) | ||
1780 | nvgpu_kfree(g, zbc_tbl); | ||
1781 | break; | ||
1782 | |||
1783 | case NVGPU_GPU_IOCTL_GET_CHARACTERISTICS: | ||
1784 | err = gk20a_ctrl_ioctl_gpu_characteristics( | ||
1785 | g, (struct nvgpu_gpu_get_characteristics *)buf); | ||
1786 | break; | ||
1787 | case NVGPU_GPU_IOCTL_PREPARE_COMPRESSIBLE_READ: | ||
1788 | err = gk20a_ctrl_prepare_compressible_read(g, | ||
1789 | (struct nvgpu_gpu_prepare_compressible_read_args *)buf); | ||
1790 | break; | ||
1791 | case NVGPU_GPU_IOCTL_MARK_COMPRESSIBLE_WRITE: | ||
1792 | err = gk20a_ctrl_mark_compressible_write(g, | ||
1793 | (struct nvgpu_gpu_mark_compressible_write_args *)buf); | ||
1794 | break; | ||
1795 | case NVGPU_GPU_IOCTL_ALLOC_AS: | ||
1796 | err = gk20a_ctrl_alloc_as(g, | ||
1797 | (struct nvgpu_alloc_as_args *)buf); | ||
1798 | break; | ||
1799 | case NVGPU_GPU_IOCTL_OPEN_TSG: | ||
1800 | err = gk20a_ctrl_open_tsg(g, | ||
1801 | (struct nvgpu_gpu_open_tsg_args *)buf); | ||
1802 | break; | ||
1803 | case NVGPU_GPU_IOCTL_GET_TPC_MASKS: | ||
1804 | err = gk20a_ctrl_get_tpc_masks(g, | ||
1805 | (struct nvgpu_gpu_get_tpc_masks_args *)buf); | ||
1806 | break; | ||
1807 | case NVGPU_GPU_IOCTL_GET_FBP_L2_MASKS: | ||
1808 | err = gk20a_ctrl_get_fbp_l2_masks(g, | ||
1809 | (struct nvgpu_gpu_get_fbp_l2_masks_args *)buf); | ||
1810 | break; | ||
1811 | case NVGPU_GPU_IOCTL_OPEN_CHANNEL: | ||
1812 | /* this arg type here, but ..gpu_open_channel_args in nvgpu.h | ||
1813 | * for consistency - they are the same */ | ||
1814 | err = gk20a_channel_open_ioctl(g, | ||
1815 | (struct nvgpu_channel_open_args *)buf); | ||
1816 | break; | ||
1817 | case NVGPU_GPU_IOCTL_FLUSH_L2: | ||
1818 | err = nvgpu_gpu_ioctl_l2_fb_ops(g, | ||
1819 | (struct nvgpu_gpu_l2_fb_args *)buf); | ||
1820 | break; | ||
1821 | case NVGPU_GPU_IOCTL_INVAL_ICACHE: | ||
1822 | err = gr_gk20a_elpg_protected_call(g, | ||
1823 | nvgpu_gpu_ioctl_inval_icache(g, (struct nvgpu_gpu_inval_icache_args *)buf)); | ||
1824 | break; | ||
1825 | |||
1826 | case NVGPU_GPU_IOCTL_SET_MMUDEBUG_MODE: | ||
1827 | err = nvgpu_gpu_ioctl_set_mmu_debug_mode(g, | ||
1828 | (struct nvgpu_gpu_mmu_debug_mode_args *)buf); | ||
1829 | break; | ||
1830 | |||
1831 | case NVGPU_GPU_IOCTL_SET_SM_DEBUG_MODE: | ||
1832 | err = gr_gk20a_elpg_protected_call(g, | ||
1833 | nvgpu_gpu_ioctl_set_debug_mode(g, (struct nvgpu_gpu_sm_debug_mode_args *)buf)); | ||
1834 | break; | ||
1835 | |||
1836 | case NVGPU_GPU_IOCTL_TRIGGER_SUSPEND: | ||
1837 | err = nvgpu_gpu_ioctl_trigger_suspend(g); | ||
1838 | break; | ||
1839 | |||
1840 | case NVGPU_GPU_IOCTL_WAIT_FOR_PAUSE: | ||
1841 | err = nvgpu_gpu_ioctl_wait_for_pause(g, | ||
1842 | (struct nvgpu_gpu_wait_pause_args *)buf); | ||
1843 | break; | ||
1844 | |||
1845 | case NVGPU_GPU_IOCTL_RESUME_FROM_PAUSE: | ||
1846 | err = nvgpu_gpu_ioctl_resume_from_pause(g); | ||
1847 | break; | ||
1848 | |||
1849 | case NVGPU_GPU_IOCTL_CLEAR_SM_ERRORS: | ||
1850 | err = nvgpu_gpu_ioctl_clear_sm_errors(g); | ||
1851 | break; | ||
1852 | |||
1853 | case NVGPU_GPU_IOCTL_GET_TPC_EXCEPTION_EN_STATUS: | ||
1854 | err = nvgpu_gpu_ioctl_has_any_exception(g, | ||
1855 | (struct nvgpu_gpu_tpc_exception_en_status_args *)buf); | ||
1856 | break; | ||
1857 | |||
1858 | case NVGPU_GPU_IOCTL_NUM_VSMS: | ||
1859 | err = gk20a_ctrl_get_num_vsms(g, | ||
1860 | (struct nvgpu_gpu_num_vsms *)buf); | ||
1861 | break; | ||
1862 | case NVGPU_GPU_IOCTL_VSMS_MAPPING: | ||
1863 | err = gk20a_ctrl_vsm_mapping(g, | ||
1864 | (struct nvgpu_gpu_vsms_mapping *)buf); | ||
1865 | break; | ||
1866 | |||
1867 | case NVGPU_GPU_IOCTL_GET_CPU_TIME_CORRELATION_INFO: | ||
1868 | err = nvgpu_gpu_get_cpu_time_correlation_info(g, | ||
1869 | (struct nvgpu_gpu_get_cpu_time_correlation_info_args *)buf); | ||
1870 | break; | ||
1871 | |||
1872 | case NVGPU_GPU_IOCTL_GET_GPU_TIME: | ||
1873 | err = nvgpu_gpu_get_gpu_time(g, | ||
1874 | (struct nvgpu_gpu_get_gpu_time_args *)buf); | ||
1875 | break; | ||
1876 | |||
1877 | case NVGPU_GPU_IOCTL_GET_ENGINE_INFO: | ||
1878 | err = nvgpu_gpu_get_engine_info(g, | ||
1879 | (struct nvgpu_gpu_get_engine_info_args *)buf); | ||
1880 | break; | ||
1881 | |||
1882 | case NVGPU_GPU_IOCTL_ALLOC_VIDMEM: | ||
1883 | err = nvgpu_gpu_alloc_vidmem(g, | ||
1884 | (struct nvgpu_gpu_alloc_vidmem_args *)buf); | ||
1885 | break; | ||
1886 | |||
1887 | case NVGPU_GPU_IOCTL_GET_MEMORY_STATE: | ||
1888 | err = nvgpu_gpu_get_memory_state(g, | ||
1889 | (struct nvgpu_gpu_get_memory_state_args *)buf); | ||
1890 | break; | ||
1891 | |||
1892 | case NVGPU_GPU_IOCTL_CLK_GET_RANGE: | ||
1893 | err = nvgpu_gpu_clk_get_range(g, priv, | ||
1894 | (struct nvgpu_gpu_clk_range_args *)buf); | ||
1895 | break; | ||
1896 | |||
1897 | case NVGPU_GPU_IOCTL_CLK_GET_VF_POINTS: | ||
1898 | err = nvgpu_gpu_clk_get_vf_points(g, priv, | ||
1899 | (struct nvgpu_gpu_clk_vf_points_args *)buf); | ||
1900 | break; | ||
1901 | |||
1902 | case NVGPU_GPU_IOCTL_CLK_SET_INFO: | ||
1903 | err = nvgpu_gpu_clk_set_info(g, priv, | ||
1904 | (struct nvgpu_gpu_clk_set_info_args *)buf); | ||
1905 | break; | ||
1906 | |||
1907 | case NVGPU_GPU_IOCTL_CLK_GET_INFO: | ||
1908 | err = nvgpu_gpu_clk_get_info(g, priv, | ||
1909 | (struct nvgpu_gpu_clk_get_info_args *)buf); | ||
1910 | break; | ||
1911 | |||
1912 | case NVGPU_GPU_IOCTL_GET_EVENT_FD: | ||
1913 | err = nvgpu_gpu_get_event_fd(g, priv, | ||
1914 | (struct nvgpu_gpu_get_event_fd_args *)buf); | ||
1915 | break; | ||
1916 | |||
1917 | case NVGPU_GPU_IOCTL_GET_VOLTAGE: | ||
1918 | err = nvgpu_gpu_get_voltage(g, | ||
1919 | (struct nvgpu_gpu_get_voltage_args *)buf); | ||
1920 | break; | ||
1921 | |||
1922 | case NVGPU_GPU_IOCTL_GET_CURRENT: | ||
1923 | err = nvgpu_gpu_get_current(g, | ||
1924 | (struct nvgpu_gpu_get_current_args *)buf); | ||
1925 | break; | ||
1926 | |||
1927 | case NVGPU_GPU_IOCTL_GET_POWER: | ||
1928 | err = nvgpu_gpu_get_power(g, | ||
1929 | (struct nvgpu_gpu_get_power_args *)buf); | ||
1930 | break; | ||
1931 | |||
1932 | case NVGPU_GPU_IOCTL_GET_TEMPERATURE: | ||
1933 | err = nvgpu_gpu_get_temperature(g, | ||
1934 | (struct nvgpu_gpu_get_temperature_args *)buf); | ||
1935 | break; | ||
1936 | |||
1937 | case NVGPU_GPU_IOCTL_SET_THERM_ALERT_LIMIT: | ||
1938 | err = nvgpu_gpu_set_therm_alert_limit(g, | ||
1939 | (struct nvgpu_gpu_set_therm_alert_limit_args *)buf); | ||
1940 | break; | ||
1941 | |||
1942 | case NVGPU_GPU_IOCTL_SET_DETERMINISTIC_OPTS: | ||
1943 | err = nvgpu_gpu_set_deterministic_opts(g, | ||
1944 | (struct nvgpu_gpu_set_deterministic_opts_args *)buf); | ||
1945 | break; | ||
1946 | |||
1947 | case NVGPU_GPU_IOCTL_READ_SINGLE_SM_ERROR_STATE: | ||
1948 | err = nvgpu_gpu_read_single_sm_error_state(g, | ||
1949 | (struct nvgpu_gpu_read_single_sm_error_state_args *)buf); | ||
1950 | break; | ||
1951 | |||
1952 | default: | ||
1953 | nvgpu_log_info(g, "unrecognized gpu ioctl cmd: 0x%x", cmd); | ||
1954 | err = -ENOTTY; | ||
1955 | break; | ||
1956 | } | ||
1957 | |||
1958 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
1959 | err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); | ||
1960 | |||
1961 | return err; | ||
1962 | } | ||