diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/uapi/linux/nvgpu.h | 737 |
1 files changed, 737 insertions, 0 deletions
diff --git a/include/uapi/linux/nvgpu.h b/include/uapi/linux/nvgpu.h new file mode 100644 index 00000000..a4693853 --- /dev/null +++ b/include/uapi/linux/nvgpu.h | |||
@@ -0,0 +1,737 @@ | |||
1 | /* | ||
2 | * NVGPU Public Interface Header | ||
3 | * | ||
4 | * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _UAPI__LINUX_NVGPU_IOCTL_H | ||
17 | #define _UAPI__LINUX_NVGPU_IOCTL_H | ||
18 | |||
19 | #include <linux/ioctl.h> | ||
20 | #include <linux/types.h> | ||
21 | |||
22 | #if !defined(__KERNEL__) | ||
23 | #define __user | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * /dev/nvhost-ctrl-gr3d devices | ||
28 | * | ||
29 | * Opening a '/dev/nvhost-ctrl-gr3d' device node creates a way to send | ||
30 | * ctrl ioctl to gpu driver. | ||
31 | * | ||
32 | * /dev/nvhost-gr3d is for channel (context specific) operations. We use | ||
33 | * /dev/nvhost-ctrl-gr3d for global (context independent) operations on | ||
34 | * gpu device. | ||
35 | */ | ||
36 | |||
37 | #define NVGPU_GPU_IOCTL_MAGIC 'G' | ||
38 | |||
39 | /* return zcull ctx size */ | ||
40 | struct nvgpu_gpu_zcull_get_ctx_size_args { | ||
41 | __u32 size; | ||
42 | } __packed; | ||
43 | |||
44 | /* return zcull info */ | ||
45 | struct nvgpu_gpu_zcull_get_info_args { | ||
46 | __u32 width_align_pixels; | ||
47 | __u32 height_align_pixels; | ||
48 | __u32 pixel_squares_by_aliquots; | ||
49 | __u32 aliquot_total; | ||
50 | __u32 region_byte_multiplier; | ||
51 | __u32 region_header_size; | ||
52 | __u32 subregion_header_size; | ||
53 | __u32 subregion_width_align_pixels; | ||
54 | __u32 subregion_height_align_pixels; | ||
55 | __u32 subregion_count; | ||
56 | }; | ||
57 | |||
58 | #define NVGPU_ZBC_COLOR_VALUE_SIZE 4 | ||
59 | #define NVGPU_ZBC_TYPE_INVALID 0 | ||
60 | #define NVGPU_ZBC_TYPE_COLOR 1 | ||
61 | #define NVGPU_ZBC_TYPE_DEPTH 2 | ||
62 | |||
63 | struct nvgpu_gpu_zbc_set_table_args { | ||
64 | __u32 color_ds[NVGPU_ZBC_COLOR_VALUE_SIZE]; | ||
65 | __u32 color_l2[NVGPU_ZBC_COLOR_VALUE_SIZE]; | ||
66 | __u32 depth; | ||
67 | __u32 format; | ||
68 | __u32 type; /* color or depth */ | ||
69 | } __packed; | ||
70 | |||
71 | struct nvgpu_gpu_zbc_query_table_args { | ||
72 | __u32 color_ds[NVGPU_ZBC_COLOR_VALUE_SIZE]; | ||
73 | __u32 color_l2[NVGPU_ZBC_COLOR_VALUE_SIZE]; | ||
74 | __u32 depth; | ||
75 | __u32 ref_cnt; | ||
76 | __u32 format; | ||
77 | __u32 type; /* color or depth */ | ||
78 | __u32 index_size; /* [out] size, [in] index */ | ||
79 | } __packed; | ||
80 | |||
81 | |||
82 | /* This contains the minimal set by which the userspace can | ||
83 | determine all the properties of the GPU */ | ||
84 | |||
85 | #define NVGPU_GPU_ARCH_GK100 0x000000E0 | ||
86 | #define NVGPU_GPU_IMPL_GK20A 0x0000000A | ||
87 | |||
88 | #define NVGPU_GPU_ARCH_GM200 0x00000120 | ||
89 | #define NVGPU_GPU_IMPL_GM20B 0x0000000B | ||
90 | |||
91 | #define NVGPU_GPU_BUS_TYPE_NONE 0 | ||
92 | #define NVGPU_GPU_BUS_TYPE_AXI 32 | ||
93 | |||
94 | #define NVGPU_GPU_FLAGS_HAS_SYNCPOINTS (1 << 0) | ||
95 | /* MAP_BUFFER_EX with partial mappings */ | ||
96 | #define NVGPU_GPU_FLAGS_SUPPORT_PARTIAL_MAPPINGS (1 << 1) | ||
97 | /* MAP_BUFFER_EX with sparse allocations */ | ||
98 | #define NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS (1 << 2) | ||
99 | |||
100 | struct nvgpu_gpu_characteristics { | ||
101 | __u32 arch; | ||
102 | __u32 impl; | ||
103 | __u32 rev; | ||
104 | |||
105 | __u32 num_gpc; | ||
106 | |||
107 | __u64 L2_cache_size; /* bytes */ | ||
108 | __u64 on_board_video_memory_size; /* bytes */ | ||
109 | |||
110 | __u32 num_tpc_per_gpc; | ||
111 | __u32 bus_type; | ||
112 | |||
113 | __u32 big_page_size; | ||
114 | __u32 compression_page_size; | ||
115 | |||
116 | __u32 pde_coverage_bit_count; | ||
117 | __u32 reserved; | ||
118 | |||
119 | __u64 flags; | ||
120 | |||
121 | /* Notes: | ||
122 | - This struct can be safely appended with new fields. However, always | ||
123 | keep the structure size multiple of 8 and make sure that the binary | ||
124 | layout does not change between 32-bit and 64-bit architectures. | ||
125 | - If the last field is reserved/padding, it is not | ||
126 | generally safe to repurpose the field in future revisions. | ||
127 | */ | ||
128 | }; | ||
129 | |||
130 | struct nvgpu_gpu_get_characteristics { | ||
131 | /* [in] size reserved by the user space. Can be 0. | ||
132 | [out] full buffer size by kernel */ | ||
133 | __u64 gpu_characteristics_buf_size; | ||
134 | |||
135 | /* [in] address of nvgpu_gpu_characteristics buffer. Filled with field | ||
136 | values by exactly MIN(buf_size_in, buf_size_out) bytes. Ignored, if | ||
137 | buf_size_in is zero. */ | ||
138 | __u64 gpu_characteristics_buf_addr; | ||
139 | }; | ||
140 | |||
141 | #define NVGPU_GPU_COMPBITS_NONE 0 | ||
142 | #define NVGPU_GPU_COMPBITS_GPU (1 << 0) | ||
143 | #define NVGPU_GPU_COMPBITS_CDEH (1 << 1) | ||
144 | #define NVGPU_GPU_COMPBITS_CDEV (1 << 2) | ||
145 | |||
146 | struct nvgpu_gpu_prepare_compressible_read_args { | ||
147 | __u32 handle; /* in, dmabuf fd */ | ||
148 | union { | ||
149 | __u32 request_compbits; /* in */ | ||
150 | __u32 valid_compbits; /* out */ | ||
151 | }; | ||
152 | __u64 offset; /* in, within handle */ | ||
153 | __u64 compbits_hoffset; /* in, within handle */ | ||
154 | __u64 compbits_voffset; /* in, within handle */ | ||
155 | __u32 width; /* in, in pixels */ | ||
156 | __u32 height; /* in, in pixels */ | ||
157 | __u32 block_height_log2; /* in */ | ||
158 | __u32 submit_flags; /* in (NVGPU_SUBMIT_GPFIFO_FLAGS_) */ | ||
159 | union { | ||
160 | struct { | ||
161 | __u32 syncpt_id; | ||
162 | __u32 syncpt_value; | ||
163 | }; | ||
164 | __s32 fd; | ||
165 | } fence; /* in/out */ | ||
166 | __u32 zbc_color; /* out */ | ||
167 | __u32 reserved[5]; /* must be zero */ | ||
168 | }; | ||
169 | |||
170 | struct nvgpu_gpu_mark_compressible_write_args { | ||
171 | __u32 handle; /* in, dmabuf fd */ | ||
172 | __u32 valid_compbits; /* in */ | ||
173 | __u64 offset; /* in, within handle */ | ||
174 | __u32 zbc_color; /* in */ | ||
175 | __u32 reserved[3]; /* must be zero */ | ||
176 | }; | ||
177 | |||
178 | #define NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE \ | ||
179 | _IOR(NVGPU_GPU_IOCTL_MAGIC, 1, struct nvgpu_gpu_zcull_get_ctx_size_args) | ||
180 | #define NVGPU_GPU_IOCTL_ZCULL_GET_INFO \ | ||
181 | _IOR(NVGPU_GPU_IOCTL_MAGIC, 2, struct nvgpu_gpu_zcull_get_info_args) | ||
182 | #define NVGPU_GPU_IOCTL_ZBC_SET_TABLE \ | ||
183 | _IOW(NVGPU_GPU_IOCTL_MAGIC, 3, struct nvgpu_gpu_zbc_set_table_args) | ||
184 | #define NVGPU_GPU_IOCTL_ZBC_QUERY_TABLE \ | ||
185 | _IOWR(NVGPU_GPU_IOCTL_MAGIC, 4, struct nvgpu_gpu_zbc_query_table_args) | ||
186 | #define NVGPU_GPU_IOCTL_GET_CHARACTERISTICS \ | ||
187 | _IOWR(NVGPU_GPU_IOCTL_MAGIC, 5, struct nvgpu_gpu_get_characteristics) | ||
188 | #define NVGPU_GPU_IOCTL_PREPARE_COMPRESSIBLE_READ \ | ||
189 | _IOWR(NVGPU_GPU_IOCTL_MAGIC, 6, struct nvgpu_gpu_prepare_compressible_read_args) | ||
190 | #define NVGPU_GPU_IOCTL_MARK_COMPRESSIBLE_WRITE \ | ||
191 | _IOWR(NVGPU_GPU_IOCTL_MAGIC, 7, struct nvgpu_gpu_mark_compressible_write_args) | ||
192 | |||
193 | #define NVGPU_GPU_IOCTL_LAST \ | ||
194 | _IOC_NR(NVGPU_GPU_IOCTL_MARK_COMPRESSIBLE_WRITE) | ||
195 | #define NVGPU_GPU_IOCTL_MAX_ARG_SIZE \ | ||
196 | sizeof(struct nvgpu_gpu_prepare_compressible_read_args) | ||
197 | |||
198 | |||
199 | /* | ||
200 | * /dev/nvhost-tsg-gpu devices | ||
201 | * | ||
202 | * Opening a '/dev/nvhost-tsg-gpu' device node creates a way to | ||
203 | * bind/unbind a channel to/from TSG group | ||
204 | */ | ||
205 | |||
206 | #define NVGPU_TSG_IOCTL_MAGIC 'T' | ||
207 | |||
208 | #define NVGPU_TSG_IOCTL_BIND_CHANNEL \ | ||
209 | _IOW(NVGPU_TSG_IOCTL_MAGIC, 1, int) | ||
210 | #define NVGPU_TSG_IOCTL_UNBIND_CHANNEL \ | ||
211 | _IOW(NVGPU_TSG_IOCTL_MAGIC, 2, int) | ||
212 | #define NVGPU_IOCTL_TSG_ENABLE \ | ||
213 | _IO(NVGPU_TSG_IOCTL_MAGIC, 3) | ||
214 | #define NVGPU_IOCTL_TSG_DISABLE \ | ||
215 | _IO(NVGPU_TSG_IOCTL_MAGIC, 4) | ||
216 | #define NVGPU_IOCTL_TSG_PREEMPT \ | ||
217 | _IO(NVGPU_TSG_IOCTL_MAGIC, 5) | ||
218 | |||
219 | #define NVGPU_TSG_IOCTL_MAX_ARG_SIZE \ | ||
220 | sizeof(int) | ||
221 | #define NVGPU_TSG_IOCTL_LAST \ | ||
222 | _IOC_NR(NVGPU_IOCTL_TSG_PREEMPT) | ||
223 | /* | ||
224 | * /dev/nvhost-dbg-* devices | ||
225 | * | ||
226 | * Opening a '/dev/nvhost-dbg-<module_name>' device node creates a new debugger | ||
227 | * session. nvgpu channels (for the same module) can then be bound to such a | ||
228 | * session. | ||
229 | * | ||
230 | * Once a nvgpu channel has been bound to a debugger session it cannot be | ||
231 | * bound to another. | ||
232 | * | ||
233 | * As long as there is an open device file to the session, or any bound | ||
234 | * nvgpu channels it will be valid. Once all references to the session | ||
235 | * are removed the session is deleted. | ||
236 | * | ||
237 | */ | ||
238 | |||
239 | #define NVGPU_DBG_GPU_IOCTL_MAGIC 'D' | ||
240 | |||
241 | /* | ||
242 | * Binding/attaching a debugger session to an nvgpu channel | ||
243 | * | ||
244 | * The 'channel_fd' given here is the fd used to allocate the | ||
245 | * gpu channel context. To detach/unbind the debugger session | ||
246 | * use a channel_fd of -1. | ||
247 | * | ||
248 | */ | ||
249 | struct nvgpu_dbg_gpu_bind_channel_args { | ||
250 | __u32 channel_fd; /* in */ | ||
251 | __u32 _pad0[1]; | ||
252 | }; | ||
253 | |||
254 | #define NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL \ | ||
255 | _IOWR(NVGPU_DBG_GPU_IOCTL_MAGIC, 1, struct nvgpu_dbg_gpu_bind_channel_args) | ||
256 | |||
257 | /* | ||
258 | * Register operations | ||
259 | */ | ||
260 | /* valid op values */ | ||
261 | #define NVGPU_DBG_GPU_REG_OP_READ_32 (0x00000000) | ||
262 | #define NVGPU_DBG_GPU_REG_OP_WRITE_32 (0x00000001) | ||
263 | #define NVGPU_DBG_GPU_REG_OP_READ_64 (0x00000002) | ||
264 | #define NVGPU_DBG_GPU_REG_OP_WRITE_64 (0x00000003) | ||
265 | /* note: 8b ops are unsupported */ | ||
266 | #define NVGPU_DBG_GPU_REG_OP_READ_08 (0x00000004) | ||
267 | #define NVGPU_DBG_GPU_REG_OP_WRITE_08 (0x00000005) | ||
268 | |||
269 | /* valid type values */ | ||
270 | #define NVGPU_DBG_GPU_REG_OP_TYPE_GLOBAL (0x00000000) | ||
271 | #define NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX (0x00000001) | ||
272 | #define NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX_TPC (0x00000002) | ||
273 | #define NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX_SM (0x00000004) | ||
274 | #define NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX_CROP (0x00000008) | ||
275 | #define NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX_ZROP (0x00000010) | ||
276 | /*#define NVGPU_DBG_GPU_REG_OP_TYPE_FB (0x00000020)*/ | ||
277 | #define NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX_QUAD (0x00000040) | ||
278 | |||
279 | /* valid status values */ | ||
280 | #define NVGPU_DBG_GPU_REG_OP_STATUS_SUCCESS (0x00000000) | ||
281 | #define NVGPU_DBG_GPU_REG_OP_STATUS_INVALID_OP (0x00000001) | ||
282 | #define NVGPU_DBG_GPU_REG_OP_STATUS_INVALID_TYPE (0x00000002) | ||
283 | #define NVGPU_DBG_GPU_REG_OP_STATUS_INVALID_OFFSET (0x00000004) | ||
284 | #define NVGPU_DBG_GPU_REG_OP_STATUS_UNSUPPORTED_OP (0x00000008) | ||
285 | #define NVGPU_DBG_GPU_REG_OP_STATUS_INVALID_MASK (0x00000010) | ||
286 | |||
287 | struct nvgpu_dbg_gpu_reg_op { | ||
288 | __u8 op; | ||
289 | __u8 type; | ||
290 | __u8 status; | ||
291 | __u8 quad; | ||
292 | __u32 group_mask; | ||
293 | __u32 sub_group_mask; | ||
294 | __u32 offset; | ||
295 | __u32 value_lo; | ||
296 | __u32 value_hi; | ||
297 | __u32 and_n_mask_lo; | ||
298 | __u32 and_n_mask_hi; | ||
299 | }; | ||
300 | |||
301 | struct nvgpu_dbg_gpu_exec_reg_ops_args { | ||
302 | __u64 ops; /* pointer to nvgpu_reg_op operations */ | ||
303 | __u32 num_ops; | ||
304 | __u32 _pad0[1]; | ||
305 | }; | ||
306 | |||
307 | #define NVGPU_DBG_GPU_IOCTL_REG_OPS \ | ||
308 | _IOWR(NVGPU_DBG_GPU_IOCTL_MAGIC, 2, struct nvgpu_dbg_gpu_exec_reg_ops_args) | ||
309 | |||
310 | /* Enable/disable/clear event notifications */ | ||
311 | struct nvgpu_dbg_gpu_events_ctrl_args { | ||
312 | __u32 cmd; /* in */ | ||
313 | __u32 _pad0[1]; | ||
314 | }; | ||
315 | |||
316 | /* valid event ctrl values */ | ||
317 | #define NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE (0x00000000) | ||
318 | #define NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE (0x00000001) | ||
319 | #define NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR (0x00000002) | ||
320 | |||
321 | #define NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL \ | ||
322 | _IOWR(NVGPU_DBG_GPU_IOCTL_MAGIC, 3, struct nvgpu_dbg_gpu_events_ctrl_args) | ||
323 | |||
324 | |||
325 | /* Powergate/Unpowergate control */ | ||
326 | |||
327 | #define NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE 1 | ||
328 | #define NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE 2 | ||
329 | |||
330 | struct nvgpu_dbg_gpu_powergate_args { | ||
331 | __u32 mode; | ||
332 | } __packed; | ||
333 | |||
334 | #define NVGPU_DBG_GPU_IOCTL_POWERGATE \ | ||
335 | _IOWR(NVGPU_DBG_GPU_IOCTL_MAGIC, 4, struct nvgpu_dbg_gpu_powergate_args) | ||
336 | |||
337 | |||
338 | /* SMPC Context Switch Mode */ | ||
339 | #define NVGPU_DBG_GPU_SMPC_CTXSW_MODE_NO_CTXSW (0x00000000) | ||
340 | #define NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW (0x00000001) | ||
341 | |||
342 | struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args { | ||
343 | __u32 mode; | ||
344 | } __packed; | ||
345 | |||
346 | #define NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE \ | ||
347 | _IOWR(NVGPU_DBG_GPU_IOCTL_MAGIC, 5, struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args) | ||
348 | |||
349 | |||
350 | #define NVGPU_DBG_GPU_IOCTL_LAST \ | ||
351 | _IOC_NR(NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE) | ||
352 | #define NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE \ | ||
353 | sizeof(struct nvgpu_dbg_gpu_exec_reg_ops_args) | ||
354 | |||
355 | /* | ||
356 | * /dev/nvhost-gpu device | ||
357 | */ | ||
358 | |||
359 | #define NVGPU_IOCTL_MAGIC 'H' | ||
360 | #define NVGPU_NO_TIMEOUT (-1) | ||
361 | #define NVGPU_PRIORITY_LOW 50 | ||
362 | #define NVGPU_PRIORITY_MEDIUM 100 | ||
363 | #define NVGPU_PRIORITY_HIGH 150 | ||
364 | #define NVGPU_TIMEOUT_FLAG_DISABLE_DUMP 0 | ||
365 | |||
366 | struct nvgpu_gpfifo { | ||
367 | __u32 entry0; /* first word of gpfifo entry */ | ||
368 | __u32 entry1; /* second word of gpfifo entry */ | ||
369 | }; | ||
370 | |||
371 | struct nvgpu_get_param_args { | ||
372 | __u32 value; | ||
373 | } __packed; | ||
374 | |||
375 | struct nvgpu_channel_open_args { | ||
376 | __s32 channel_fd; | ||
377 | }; | ||
378 | |||
379 | struct nvgpu_set_nvmap_fd_args { | ||
380 | __u32 fd; | ||
381 | } __packed; | ||
382 | |||
383 | struct nvgpu_alloc_obj_ctx_args { | ||
384 | __u32 class_num; /* kepler3d, 2d, compute, etc */ | ||
385 | __u32 padding; | ||
386 | __u64 obj_id; /* output, used to free later */ | ||
387 | }; | ||
388 | |||
389 | struct nvgpu_free_obj_ctx_args { | ||
390 | __u64 obj_id; /* obj ctx to free */ | ||
391 | }; | ||
392 | |||
393 | struct nvgpu_alloc_gpfifo_args { | ||
394 | __u32 num_entries; | ||
395 | #define NVGPU_ALLOC_GPFIFO_FLAGS_VPR_ENABLED (1 << 0) /* set owner channel of this gpfifo as a vpr channel */ | ||
396 | __u32 flags; | ||
397 | |||
398 | }; | ||
399 | |||
400 | struct gk20a_sync_pt_info { | ||
401 | __u64 hw_op_ns; | ||
402 | }; | ||
403 | |||
404 | struct nvgpu_fence { | ||
405 | __u32 id; /* syncpoint id or sync fence fd */ | ||
406 | __u32 value; /* syncpoint value (discarded when using sync fence) */ | ||
407 | }; | ||
408 | |||
409 | /* insert a wait on the fence before submitting gpfifo */ | ||
410 | #define NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT BIT(0) | ||
411 | /* insert a fence update after submitting gpfifo and | ||
412 | return the new fence for others to wait on */ | ||
413 | #define NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET BIT(1) | ||
414 | /* choose between different gpfifo entry formats */ | ||
415 | #define NVGPU_SUBMIT_GPFIFO_FLAGS_HW_FORMAT BIT(2) | ||
416 | /* interpret fence as a sync fence fd instead of raw syncpoint fence */ | ||
417 | #define NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE BIT(3) | ||
418 | /* suppress WFI before fence trigger */ | ||
419 | #define NVGPU_SUBMIT_GPFIFO_FLAGS_SUPPRESS_WFI BIT(4) | ||
420 | |||
421 | struct nvgpu_submit_gpfifo_args { | ||
422 | __u64 gpfifo; | ||
423 | __u32 num_entries; | ||
424 | __u32 flags; | ||
425 | struct nvgpu_fence fence; | ||
426 | }; | ||
427 | |||
428 | struct nvgpu_map_buffer_args { | ||
429 | __u32 flags; | ||
430 | #define NVGPU_MAP_BUFFER_FLAGS_ALIGN 0x0 | ||
431 | #define NVGPU_MAP_BUFFER_FLAGS_OFFSET BIT(0) | ||
432 | #define NVGPU_MAP_BUFFER_FLAGS_KIND_PITCH 0x0 | ||
433 | #define NVGPU_MAP_BUFFER_FLAGS_KIND_SPECIFIED BIT(1) | ||
434 | #define NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE 0x0 | ||
435 | #define NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE BIT(2) | ||
436 | __u32 nvmap_handle; | ||
437 | union { | ||
438 | __u64 offset; /* valid if _offset flag given (in|out) */ | ||
439 | __u64 align; /* alignment multiple (0:={1 or n/a}) */ | ||
440 | } offset_alignment; | ||
441 | __u32 kind; | ||
442 | #define NVGPU_MAP_BUFFER_KIND_GENERIC_16BX2 0xfe | ||
443 | }; | ||
444 | |||
445 | struct nvgpu_unmap_buffer_args { | ||
446 | __u64 offset; | ||
447 | }; | ||
448 | |||
449 | struct nvgpu_wait_args { | ||
450 | #define NVGPU_WAIT_TYPE_NOTIFIER 0x0 | ||
451 | #define NVGPU_WAIT_TYPE_SEMAPHORE 0x1 | ||
452 | __u32 type; | ||
453 | __u32 timeout; | ||
454 | union { | ||
455 | struct { | ||
456 | /* handle and offset for notifier memory */ | ||
457 | __u32 dmabuf_fd; | ||
458 | __u32 offset; | ||
459 | __u32 padding1; | ||
460 | __u32 padding2; | ||
461 | } notifier; | ||
462 | struct { | ||
463 | /* handle and offset for semaphore memory */ | ||
464 | __u32 dmabuf_fd; | ||
465 | __u32 offset; | ||
466 | /* semaphore payload to wait for */ | ||
467 | __u32 payload; | ||
468 | __u32 padding; | ||
469 | } semaphore; | ||
470 | } condition; /* determined by type field */ | ||
471 | }; | ||
472 | |||
473 | /* cycle stats support */ | ||
474 | struct nvgpu_cycle_stats_args { | ||
475 | __u32 dmabuf_fd; | ||
476 | } __packed; | ||
477 | |||
478 | struct nvgpu_set_timeout_args { | ||
479 | __u32 timeout; | ||
480 | } __packed; | ||
481 | |||
482 | struct nvgpu_set_timeout_ex_args { | ||
483 | __u32 timeout; | ||
484 | __u32 flags; | ||
485 | }; | ||
486 | |||
487 | struct nvgpu_set_priority_args { | ||
488 | __u32 priority; | ||
489 | } __packed; | ||
490 | |||
491 | #define NVGPU_ZCULL_MODE_GLOBAL 0 | ||
492 | #define NVGPU_ZCULL_MODE_NO_CTXSW 1 | ||
493 | #define NVGPU_ZCULL_MODE_SEPARATE_BUFFER 2 | ||
494 | #define NVGPU_ZCULL_MODE_PART_OF_REGULAR_BUF 3 | ||
495 | |||
496 | struct nvgpu_zcull_bind_args { | ||
497 | __u64 gpu_va; | ||
498 | __u32 mode; | ||
499 | __u32 padding; | ||
500 | }; | ||
501 | |||
502 | struct nvgpu_set_error_notifier { | ||
503 | __u64 offset; | ||
504 | __u64 size; | ||
505 | __u32 mem; | ||
506 | __u32 padding; | ||
507 | }; | ||
508 | |||
509 | struct nvgpu_notification { | ||
510 | struct { /* 0000- */ | ||
511 | __u32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 */ | ||
512 | } time_stamp; /* -0007 */ | ||
513 | __u32 info32; /* info returned depends on method 0008-000b */ | ||
514 | #define NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT 8 | ||
515 | #define NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY 13 | ||
516 | #define NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT 24 | ||
517 | #define NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY 25 | ||
518 | #define NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT 31 | ||
519 | #define NVGPU_CHANNEL_PBDMA_ERROR 32 | ||
520 | #define NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR 43 | ||
521 | __u16 info16; /* info returned depends on method 000c-000d */ | ||
522 | __u16 status; /* user sets bit 15, NV sets status 000e-000f */ | ||
523 | #define NVGPU_CHANNEL_SUBMIT_TIMEOUT 1 | ||
524 | }; | ||
525 | |||
526 | /* Enable/disable/clear event notifications */ | ||
527 | struct nvgpu_channel_events_ctrl_args { | ||
528 | __u32 cmd; /* in */ | ||
529 | __u32 _pad0[1]; | ||
530 | }; | ||
531 | |||
532 | /* valid event ctrl values */ | ||
533 | #define NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_DISABLE 0 | ||
534 | #define NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_ENABLE 1 | ||
535 | #define NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_CLEAR 2 | ||
536 | |||
537 | #define NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD \ | ||
538 | _IOW(NVGPU_IOCTL_MAGIC, 5, struct nvgpu_set_nvmap_fd_args) | ||
539 | #define NVGPU_IOCTL_CHANNEL_SET_TIMEOUT \ | ||
540 | _IOW(NVGPU_IOCTL_MAGIC, 11, struct nvgpu_set_timeout_args) | ||
541 | #define NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT \ | ||
542 | _IOR(NVGPU_IOCTL_MAGIC, 12, struct nvgpu_get_param_args) | ||
543 | #define NVGPU_IOCTL_CHANNEL_SET_PRIORITY \ | ||
544 | _IOW(NVGPU_IOCTL_MAGIC, 13, struct nvgpu_set_priority_args) | ||
545 | #define NVGPU_IOCTL_CHANNEL_SET_TIMEOUT_EX \ | ||
546 | _IOWR(NVGPU_IOCTL_MAGIC, 18, struct nvgpu_set_timeout_ex_args) | ||
547 | #define NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO \ | ||
548 | _IOW(NVGPU_IOCTL_MAGIC, 100, struct nvgpu_alloc_gpfifo_args) | ||
549 | #define NVGPU_IOCTL_CHANNEL_WAIT \ | ||
550 | _IOWR(NVGPU_IOCTL_MAGIC, 102, struct nvgpu_wait_args) | ||
551 | #define NVGPU_IOCTL_CHANNEL_CYCLE_STATS \ | ||
552 | _IOWR(NVGPU_IOCTL_MAGIC, 106, struct nvgpu_cycle_stats_args) | ||
553 | #define NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO \ | ||
554 | _IOWR(NVGPU_IOCTL_MAGIC, 107, struct nvgpu_submit_gpfifo_args) | ||
555 | #define NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX \ | ||
556 | _IOWR(NVGPU_IOCTL_MAGIC, 108, struct nvgpu_alloc_obj_ctx_args) | ||
557 | #define NVGPU_IOCTL_CHANNEL_FREE_OBJ_CTX \ | ||
558 | _IOR(NVGPU_IOCTL_MAGIC, 109, struct nvgpu_free_obj_ctx_args) | ||
559 | #define NVGPU_IOCTL_CHANNEL_ZCULL_BIND \ | ||
560 | _IOWR(NVGPU_IOCTL_MAGIC, 110, struct nvgpu_zcull_bind_args) | ||
561 | #define NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER \ | ||
562 | _IOWR(NVGPU_IOCTL_MAGIC, 111, struct nvgpu_set_error_notifier) | ||
563 | #define NVGPU_IOCTL_CHANNEL_OPEN \ | ||
564 | _IOR(NVGPU_IOCTL_MAGIC, 112, struct nvgpu_channel_open_args) | ||
565 | #define NVGPU_IOCTL_CHANNEL_ENABLE \ | ||
566 | _IO(NVGPU_IOCTL_MAGIC, 113) | ||
567 | #define NVGPU_IOCTL_CHANNEL_DISABLE \ | ||
568 | _IO(NVGPU_IOCTL_MAGIC, 114) | ||
569 | #define NVGPU_IOCTL_CHANNEL_PREEMPT \ | ||
570 | _IO(NVGPU_IOCTL_MAGIC, 115) | ||
571 | #define NVGPU_IOCTL_CHANNEL_FORCE_RESET \ | ||
572 | _IO(NVGPU_IOCTL_MAGIC, 116) | ||
573 | #define NVGPU_IOCTL_CHANNEL_EVENTS_CTRL \ | ||
574 | _IOW(NVGPU_IOCTL_MAGIC, 117, struct nvgpu_channel_events_ctrl_args) | ||
575 | |||
576 | #define NVGPU_IOCTL_CHANNEL_LAST \ | ||
577 | _IOC_NR(NVGPU_IOCTL_CHANNEL_EVENTS_CTRL) | ||
578 | #define NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvgpu_submit_gpfifo_args) | ||
579 | |||
580 | /* | ||
581 | * /dev/nvhost-as-* devices | ||
582 | * | ||
583 | * Opening a '/dev/nvhost-as-<module_name>' device node creates a new address | ||
584 | * space. nvgpu channels (for the same module) can then be bound to such an | ||
585 | * address space to define the addresses it has access to. | ||
586 | * | ||
587 | * Once a nvgpu channel has been bound to an address space it cannot be | ||
588 | * unbound. There is no support for allowing an nvgpu channel to change from | ||
589 | * one address space to another (or from one to none). | ||
590 | * | ||
591 | * As long as there is an open device file to the address space, or any bound | ||
592 | * nvgpu channels it will be valid. Once all references to the address space | ||
593 | * are removed the address space is deleted. | ||
594 | * | ||
595 | */ | ||
596 | |||
597 | #define NVGPU_AS_IOCTL_MAGIC 'A' | ||
598 | |||
599 | /* | ||
600 | * Allocating an address space range: | ||
601 | * | ||
602 | * Address ranges created with this ioctl are reserved for later use with | ||
603 | * fixed-address buffer mappings. | ||
604 | * | ||
605 | * If _FLAGS_FIXED_OFFSET is specified then the new range starts at the 'offset' | ||
606 | * given. Otherwise the address returned is chosen to be a multiple of 'align.' | ||
607 | * | ||
608 | */ | ||
609 | struct nvgpu32_as_alloc_space_args { | ||
610 | __u32 pages; /* in, pages */ | ||
611 | __u32 page_size; /* in, bytes */ | ||
612 | __u32 flags; /* in */ | ||
613 | #define NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET 0x1 | ||
614 | #define NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE 0x2 | ||
615 | union { | ||
616 | __u64 offset; /* inout, byte address valid iff _FIXED_OFFSET */ | ||
617 | __u64 align; /* in, alignment multiple (0:={1 or n/a}) */ | ||
618 | } o_a; | ||
619 | }; | ||
620 | |||
621 | struct nvgpu_as_alloc_space_args { | ||
622 | __u32 pages; /* in, pages */ | ||
623 | __u32 page_size; /* in, bytes */ | ||
624 | __u32 flags; /* in */ | ||
625 | __u32 padding; /* in */ | ||
626 | union { | ||
627 | __u64 offset; /* inout, byte address valid iff _FIXED_OFFSET */ | ||
628 | __u64 align; /* in, alignment multiple (0:={1 or n/a}) */ | ||
629 | } o_a; | ||
630 | }; | ||
631 | |||
632 | /* | ||
633 | * Releasing an address space range: | ||
634 | * | ||
635 | * The previously allocated region starting at 'offset' is freed. If there are | ||
636 | * any buffers currently mapped inside the region the ioctl will fail. | ||
637 | */ | ||
638 | struct nvgpu_as_free_space_args { | ||
639 | __u64 offset; /* in, byte address */ | ||
640 | __u32 pages; /* in, pages */ | ||
641 | __u32 page_size; /* in, bytes */ | ||
642 | }; | ||
643 | |||
644 | /* | ||
645 | * Binding a nvgpu channel to an address space: | ||
646 | * | ||
647 | * A channel must be bound to an address space before allocating a gpfifo | ||
648 | * in nvgpu. The 'channel_fd' given here is the fd used to allocate the | ||
649 | * channel. Once a channel has been bound to an address space it cannot | ||
650 | * be unbound (except for when the channel is destroyed). | ||
651 | */ | ||
652 | struct nvgpu_as_bind_channel_args { | ||
653 | __u32 channel_fd; /* in */ | ||
654 | } __packed; | ||
655 | |||
656 | /* | ||
657 | * Mapping nvmap buffers into an address space: | ||
658 | * | ||
659 | * The start address is the 'offset' given if _FIXED_OFFSET is specified. | ||
660 | * Otherwise the address returned is a multiple of 'align.' | ||
661 | * | ||
662 | * If 'page_size' is set to 0 the nvmap buffer's allocation alignment/sizing | ||
663 | * will be used to determine the page size (largest possible). The page size | ||
664 | * chosen will be returned back to the caller in the 'page_size' parameter in | ||
665 | * that case. | ||
666 | */ | ||
667 | struct nvgpu_as_map_buffer_args { | ||
668 | __u32 flags; /* in/out */ | ||
669 | #define NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET BIT(0) | ||
670 | #define NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE BIT(2) | ||
671 | __u32 reserved; /* in */ | ||
672 | __u32 dmabuf_fd; /* in */ | ||
673 | __u32 page_size; /* inout, 0:= best fit to buffer */ | ||
674 | union { | ||
675 | __u64 offset; /* inout, byte address valid iff _FIXED_OFFSET */ | ||
676 | __u64 align; /* in, alignment multiple (0:={1 or n/a}) */ | ||
677 | } o_a; | ||
678 | }; | ||
679 | |||
680 | /* | ||
681 | * Mapping dmabuf fds into an address space: | ||
682 | * | ||
683 | * The caller requests a mapping to a particular page 'kind'. | ||
684 | * | ||
685 | * If 'page_size' is set to 0 the dmabuf's alignment/sizing will be used to | ||
686 | * determine the page size (largest possible). The page size chosen will be | ||
687 | * returned back to the caller in the 'page_size' parameter in that case. | ||
688 | */ | ||
689 | struct nvgpu_as_map_buffer_ex_args { | ||
690 | __u32 flags; /* in/out */ | ||
691 | #define NV_KIND_DEFAULT -1 | ||
692 | __s32 kind; /* in (-1 represents default) */ | ||
693 | __u32 dmabuf_fd; /* in */ | ||
694 | __u32 page_size; /* inout, 0:= best fit to buffer */ | ||
695 | |||
696 | __u64 buffer_offset; /* in, offset of mapped buffer region */ | ||
697 | __u64 mapping_size; /* in, size of mapped buffer region */ | ||
698 | |||
699 | __u64 offset; /* in/out, we use this address if flag | ||
700 | * FIXED_OFFSET is set. This will fail | ||
701 | * if space is not properly allocated. The | ||
702 | * actual virtual address to which we mapped | ||
703 | * the buffer is returned in this field. */ | ||
704 | }; | ||
705 | |||
706 | /* | ||
707 | * Unmapping a buffer: | ||
708 | * | ||
709 | * To unmap a previously mapped buffer set 'offset' to the offset returned in | ||
710 | * the mapping call. This includes where a buffer has been mapped into a fixed | ||
711 | * offset of a previously allocated address space range. | ||
712 | */ | ||
713 | struct nvgpu_as_unmap_buffer_args { | ||
714 | __u64 offset; /* in, byte address */ | ||
715 | }; | ||
716 | |||
717 | #define NVGPU_AS_IOCTL_BIND_CHANNEL \ | ||
718 | _IOWR(NVGPU_AS_IOCTL_MAGIC, 1, struct nvgpu_as_bind_channel_args) | ||
719 | #define NVGPU32_AS_IOCTL_ALLOC_SPACE \ | ||
720 | _IOWR(NVGPU_AS_IOCTL_MAGIC, 2, struct nvgpu32_as_alloc_space_args) | ||
721 | #define NVGPU_AS_IOCTL_FREE_SPACE \ | ||
722 | _IOWR(NVGPU_AS_IOCTL_MAGIC, 3, struct nvgpu_as_free_space_args) | ||
723 | #define NVGPU_AS_IOCTL_MAP_BUFFER \ | ||
724 | _IOWR(NVGPU_AS_IOCTL_MAGIC, 4, struct nvgpu_as_map_buffer_args) | ||
725 | #define NVGPU_AS_IOCTL_UNMAP_BUFFER \ | ||
726 | _IOWR(NVGPU_AS_IOCTL_MAGIC, 5, struct nvgpu_as_unmap_buffer_args) | ||
727 | #define NVGPU_AS_IOCTL_ALLOC_SPACE \ | ||
728 | _IOWR(NVGPU_AS_IOCTL_MAGIC, 6, struct nvgpu_as_alloc_space_args) | ||
729 | #define NVGPU_AS_IOCTL_MAP_BUFFER_EX \ | ||
730 | _IOWR(NVGPU_AS_IOCTL_MAGIC, 7, struct nvgpu_as_map_buffer_ex_args) | ||
731 | |||
732 | #define NVGPU_AS_IOCTL_LAST \ | ||
733 | _IOC_NR(NVGPU_AS_IOCTL_MAP_BUFFER_EX) | ||
734 | #define NVGPU_AS_IOCTL_MAX_ARG_SIZE \ | ||
735 | sizeof(struct nvgpu_as_map_buffer_ex_args) | ||
736 | |||
737 | #endif | ||