aboutsummaryrefslogtreecommitdiffstats
path: root/include/nvgpu/vgpu
diff options
context:
space:
mode:
Diffstat (limited to 'include/nvgpu/vgpu')
-rw-r--r--include/nvgpu/vgpu/tegra_vgpu.h817
-rw-r--r--include/nvgpu/vgpu/vgpu.h110
-rw-r--r--include/nvgpu/vgpu/vgpu_ivc.h45
-rw-r--r--include/nvgpu/vgpu/vgpu_ivm.h37
-rw-r--r--include/nvgpu/vgpu/vm.h31
5 files changed, 0 insertions, 1040 deletions
diff --git a/include/nvgpu/vgpu/tegra_vgpu.h b/include/nvgpu/vgpu/tegra_vgpu.h
deleted file mode 100644
index e33dce9..0000000
--- a/include/nvgpu/vgpu/tegra_vgpu.h
+++ /dev/null
@@ -1,817 +0,0 @@
1/*
2 * Tegra GPU Virtualization Interfaces to Server
3 *
4 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef TEGRA_VGPU_H
26#define TEGRA_VGPU_H
27
28#include <nvgpu/types.h>
29#include <nvgpu/ecc.h> /* For NVGPU_ECC_STAT_NAME_MAX_SIZE */
30
31enum {
32 TEGRA_VGPU_MODULE_GPU = 0,
33};
34
35enum {
36 /* Needs to follow last entry in TEGRA_VHOST_QUEUE_* list,
37 * in tegra_vhost.h
38 */
39 TEGRA_VGPU_QUEUE_CMD = 3,
40 TEGRA_VGPU_QUEUE_INTR
41};
42
43enum {
44 TEGRA_VGPU_CMD_CONNECT = 0,
45 TEGRA_VGPU_CMD_DISCONNECT = 1,
46 TEGRA_VGPU_CMD_ABORT = 2,
47 TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX = 3,
48 TEGRA_VGPU_CMD_CHANNEL_FREE_HWCTX = 4,
49 TEGRA_VGPU_CMD_GET_ATTRIBUTE = 5,
50 TEGRA_VGPU_CMD_MAP_BAR1 = 6,
51 TEGRA_VGPU_CMD_AS_ALLOC_SHARE = 7,
52 TEGRA_VGPU_CMD_AS_BIND_SHARE = 8,
53 TEGRA_VGPU_CMD_AS_FREE_SHARE = 9,
54 TEGRA_VGPU_CMD_AS_UNMAP = 11,
55 TEGRA_VGPU_CMD_CHANNEL_BIND = 13,
56 TEGRA_VGPU_CMD_CHANNEL_UNBIND = 14,
57 TEGRA_VGPU_CMD_CHANNEL_DISABLE = 15,
58 TEGRA_VGPU_CMD_CHANNEL_PREEMPT = 16,
59 TEGRA_VGPU_CMD_CHANNEL_SETUP_RAMFC = 17,
60 TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_CTX = 20,
61 TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX = 21,
62 TEGRA_VGPU_CMD_CHANNEL_FREE_GR_PATCH_CTX = 22,
63 TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX = 23,
64 TEGRA_VGPU_CMD_CHANNEL_UNMAP_GR_GLOBAL_CTX = 24,
65 TEGRA_VGPU_CMD_CHANNEL_COMMIT_GR_GLOBAL_CTX = 25,
66 TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX = 26,
67 TEGRA_VGPU_CMD_CHANNEL_BIND_ZCULL = 27,
68 TEGRA_VGPU_CMD_CACHE_MAINT = 28,
69 TEGRA_VGPU_CMD_SUBMIT_RUNLIST = 29,
70 TEGRA_VGPU_CMD_GET_ZCULL_INFO = 30,
71 TEGRA_VGPU_CMD_ZBC_SET_TABLE = 31,
72 TEGRA_VGPU_CMD_ZBC_QUERY_TABLE = 32,
73 TEGRA_VGPU_CMD_AS_MAP_EX = 33,
74 TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS = 34,
75 TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE = 35,
76 TEGRA_VGPU_CMD_SET_SM_DEBUG_MODE = 36,
77 TEGRA_VGPU_CMD_REG_OPS = 37,
78 TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY = 38,
79 TEGRA_VGPU_CMD_CHANNEL_SET_RUNLIST_INTERLEAVE = 39,
80 TEGRA_VGPU_CMD_CHANNEL_SET_TIMESLICE = 40,
81 TEGRA_VGPU_CMD_FECS_TRACE_ENABLE = 41,
82 TEGRA_VGPU_CMD_FECS_TRACE_DISABLE = 42,
83 TEGRA_VGPU_CMD_FECS_TRACE_POLL = 43,
84 TEGRA_VGPU_CMD_FECS_TRACE_SET_FILTER = 44,
85 TEGRA_VGPU_CMD_CHANNEL_SET_SMPC_CTXSW_MODE = 45,
86 TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE = 46,
87 TEGRA_VGPU_CMD_CHANNEL_FREE_HWPM_CTX = 47,
88 TEGRA_VGPU_CMD_GR_CTX_ALLOC = 48,
89 TEGRA_VGPU_CMD_GR_CTX_FREE = 49,
90 TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX = 50,
91 TEGRA_VGPU_CMD_TSG_BIND_GR_CTX = 51,
92 TEGRA_VGPU_CMD_TSG_BIND_CHANNEL = 52,
93 TEGRA_VGPU_CMD_TSG_UNBIND_CHANNEL = 53,
94 TEGRA_VGPU_CMD_TSG_PREEMPT = 54,
95 TEGRA_VGPU_CMD_TSG_SET_TIMESLICE = 55,
96 TEGRA_VGPU_CMD_TSG_SET_RUNLIST_INTERLEAVE = 56,
97 TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET = 57,
98 TEGRA_VGPU_CMD_CHANNEL_ENABLE = 58,
99 TEGRA_VGPU_CMD_READ_PTIMER = 59,
100 TEGRA_VGPU_CMD_SET_POWERGATE = 60,
101 TEGRA_VGPU_CMD_SET_GPU_CLK_RATE = 61,
102 TEGRA_VGPU_CMD_GET_CONSTANTS = 62,
103 TEGRA_VGPU_CMD_CHANNEL_CYCLESTATS_SNAPSHOT = 63,
104 TEGRA_VGPU_CMD_TSG_OPEN = 64,
105 TEGRA_VGPU_CMD_GET_GPU_LOAD = 65,
106 TEGRA_VGPU_CMD_SUSPEND_CONTEXTS = 66,
107 TEGRA_VGPU_CMD_RESUME_CONTEXTS = 67,
108 TEGRA_VGPU_CMD_CLEAR_SM_ERROR_STATE = 68,
109 TEGRA_VGPU_CMD_GET_GPU_CLK_RATE = 69,
110 TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE = 70,
111 TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE = 71,
112 TEGRA_VGPU_CMD_PROF_MGT = 72,
113 TEGRA_VGPU_CMD_PERFBUF_MGT = 73,
114 TEGRA_VGPU_CMD_GET_TIMESTAMPS_ZIPPER = 74,
115 TEGRA_VGPU_CMD_TSG_RELEASE = 75,
116 TEGRA_VGPU_CMD_GET_VSMS_MAPPING = 76,
117 TEGRA_VGPU_CMD_ALLOC_CTX_HEADER = 77,
118 TEGRA_VGPU_CMD_FREE_CTX_HEADER = 78,
119 TEGRA_VGPU_CMD_MAP_SYNCPT = 79,
120 TEGRA_VGPU_CMD_TSG_BIND_CHANNEL_EX = 80,
121 TEGRA_VGPU_CMD_UPDATE_PC_SAMPLING = 81,
122 TEGRA_VGPU_CMD_SUSPEND = 82,
123 TEGRA_VGPU_CMD_RESUME = 83,
124 TEGRA_VGPU_CMD_GET_ECC_INFO = 84,
125 TEGRA_VGPU_CMD_GET_ECC_COUNTER_VALUE = 85,
126 TEGRA_VGPU_CMD_FB_SET_MMU_DEBUG_MODE = 88,
127 TEGRA_VGPU_CMD_GR_SET_MMU_DEBUG_MODE = 89,
128};
129
130struct tegra_vgpu_connect_params {
131 u32 module;
132 u64 handle;
133};
134
135struct tegra_vgpu_channel_hwctx_params {
136 u32 id;
137 u64 pid;
138 u64 handle;
139};
140
141struct tegra_vgpu_attrib_params {
142 u32 attrib;
143 u32 value;
144};
145
146struct tegra_vgpu_as_share_params {
147 u64 size;
148 u64 handle;
149 u32 big_page_size;
150};
151
152struct tegra_vgpu_as_bind_share_params {
153 u64 as_handle;
154 u64 chan_handle;
155};
156
157enum {
158 TEGRA_VGPU_MAP_PROT_NONE = 0,
159 TEGRA_VGPU_MAP_PROT_READ_ONLY,
160 TEGRA_VGPU_MAP_PROT_WRITE_ONLY
161};
162
163struct tegra_vgpu_as_map_params {
164 u64 handle;
165 u64 addr;
166 u64 gpu_va;
167 u64 size;
168 u8 pgsz_idx;
169 u8 iova;
170 u8 kind;
171 u8 cacheable;
172 u8 clear_ctags;
173 u8 prot;
174 u32 ctag_offset;
175};
176
177#define TEGRA_VGPU_MAP_CACHEABLE (1 << 0)
178#define TEGRA_VGPU_MAP_IO_COHERENT (1 << 1)
179#define TEGRA_VGPU_MAP_L3_ALLOC (1 << 2)
180#define TEGRA_VGPU_MAP_PLATFORM_ATOMIC (1 << 3)
181
182struct tegra_vgpu_as_map_ex_params {
183 u64 handle;
184 u64 gpu_va;
185 u64 size;
186 u32 mem_desc_count;
187 u8 pgsz_idx;
188 u8 iova;
189 u8 kind;
190 u32 flags;
191 u8 clear_ctags;
192 u8 prot;
193 u32 ctag_offset;
194};
195
196struct tegra_vgpu_mem_desc {
197 u64 addr;
198 u64 length;
199};
200
201struct tegra_vgpu_channel_config_params {
202 u64 handle;
203};
204
205struct tegra_vgpu_ramfc_params {
206 u64 handle;
207 u64 gpfifo_va;
208 u32 num_entries;
209 u64 userd_addr;
210 u8 iova;
211};
212
213struct tegra_vgpu_ch_ctx_params {
214 u64 handle;
215 u64 gr_ctx_va;
216 u64 patch_ctx_va;
217 u64 cb_va;
218 u64 attr_va;
219 u64 page_pool_va;
220 u64 priv_access_map_va;
221 u64 fecs_trace_va;
222 u32 class_num;
223};
224
225struct tegra_vgpu_zcull_bind_params {
226 u64 handle;
227 u64 zcull_va;
228 u32 mode;
229};
230
231enum {
232 TEGRA_VGPU_L2_MAINT_FLUSH = 0,
233 TEGRA_VGPU_L2_MAINT_INV,
234 TEGRA_VGPU_L2_MAINT_FLUSH_INV,
235 TEGRA_VGPU_FB_FLUSH
236};
237
238struct tegra_vgpu_cache_maint_params {
239 u8 op;
240};
241
242struct tegra_vgpu_runlist_params {
243 u8 runlist_id;
244 u32 num_entries;
245};
246
247struct tegra_vgpu_golden_ctx_params {
248 u32 size;
249};
250
251struct tegra_vgpu_zcull_info_params {
252 u32 width_align_pixels;
253 u32 height_align_pixels;
254 u32 pixel_squares_by_aliquots;
255 u32 aliquot_total;
256 u32 region_byte_multiplier;
257 u32 region_header_size;
258 u32 subregion_header_size;
259 u32 subregion_width_align_pixels;
260 u32 subregion_height_align_pixels;
261 u32 subregion_count;
262};
263
264#define TEGRA_VGPU_ZBC_COLOR_VALUE_SIZE 4
265#define TEGRA_VGPU_ZBC_TYPE_INVALID 0
266#define TEGRA_VGPU_ZBC_TYPE_COLOR 1
267#define TEGRA_VGPU_ZBC_TYPE_DEPTH 2
268
269struct tegra_vgpu_zbc_set_table_params {
270 u32 color_ds[TEGRA_VGPU_ZBC_COLOR_VALUE_SIZE];
271 u32 color_l2[TEGRA_VGPU_ZBC_COLOR_VALUE_SIZE];
272 u32 depth;
273 u32 format;
274 u32 type; /* color or depth */
275};
276
277struct tegra_vgpu_zbc_query_table_params {
278 u32 color_ds[TEGRA_VGPU_ZBC_COLOR_VALUE_SIZE];
279 u32 color_l2[TEGRA_VGPU_ZBC_COLOR_VALUE_SIZE];
280 u32 depth;
281 u32 ref_cnt;
282 u32 format;
283 u32 type; /* color or depth */
284 u32 index_size; /* [out] size, [in] index */
285};
286
287enum {
288 TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN,
289 TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL,
290 TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL,
291 TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB,
292 TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_LAST
293};
294
295enum {
296 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI,
297 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP,
298 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA,
299 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP,
300 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_LAST
301};
302
303struct tegra_vgpu_gr_bind_ctxsw_buffers_params {
304 u64 handle; /* deprecated */
305 u64 gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_LAST];
306 u64 size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_LAST];
307 u32 mode;
308 u64 gr_ctx_handle;
309};
310
311struct tegra_vgpu_mmu_debug_mode {
312 u32 enable;
313};
314
315struct tegra_vgpu_sm_debug_mode {
316 u64 handle;
317 u64 sms;
318 u32 enable;
319};
320
321struct tegra_vgpu_reg_op {
322 u8 op;
323 u8 type;
324 u8 status;
325 u8 quad;
326 u32 group_mask;
327 u32 sub_group_mask;
328 u32 offset;
329 u32 value_lo;
330 u32 value_hi;
331 u32 and_n_mask_lo;
332 u32 and_n_mask_hi;
333};
334
335struct tegra_vgpu_reg_ops_params {
336 u64 handle;
337 u64 num_ops;
338 u32 is_profiler;
339};
340
341struct tegra_vgpu_channel_priority_params {
342 u64 handle;
343 u32 priority;
344};
345
346/* level follows nvgpu.h definitions */
347struct tegra_vgpu_channel_runlist_interleave_params {
348 u64 handle;
349 u32 level;
350};
351
352struct tegra_vgpu_channel_timeslice_params {
353 u64 handle;
354 u32 timeslice_us;
355};
356
357#define TEGRA_VGPU_FECS_TRACE_FILTER_SIZE 256
358struct tegra_vgpu_fecs_trace_filter {
359 u64 tag_bits[(TEGRA_VGPU_FECS_TRACE_FILTER_SIZE + 63) / 64];
360};
361
362enum {
363 TEGRA_VGPU_CTXSW_MODE_NO_CTXSW = 0,
364 TEGRA_VGPU_CTXSW_MODE_CTXSW,
365 TEGRA_VGPU_CTXSW_MODE_STREAM_OUT_CTXSW,
366};
367
368enum {
369 TEGRA_VGPU_DISABLE_SAMPLING = 0,
370 TEGRA_VGPU_ENABLE_SAMPLING,
371};
372struct tegra_vgpu_channel_set_ctxsw_mode {
373 u64 handle;
374 u64 gpu_va;
375 u32 mode;
376};
377
378struct tegra_vgpu_channel_update_pc_sampling {
379 u64 handle;
380 u32 mode;
381};
382
383struct tegra_vgpu_channel_free_hwpm_ctx {
384 u64 handle;
385};
386
387struct tegra_vgpu_ecc_info_params {
388 u32 ecc_stats_count;
389};
390
391struct tegra_vgpu_ecc_info_entry {
392 u32 ecc_id;
393 char name[NVGPU_ECC_STAT_NAME_MAX_SIZE];
394};
395
396struct tegra_vgpu_ecc_counter_params {
397 u32 ecc_id;
398 u32 value;
399};
400
401struct tegra_vgpu_gr_ctx_params {
402 u64 gr_ctx_handle;
403 u64 as_handle;
404 u64 gr_ctx_va;
405 u32 class_num;
406 u32 tsg_id;
407};
408
409struct tegra_vgpu_channel_bind_gr_ctx_params {
410 u64 ch_handle;
411 u64 gr_ctx_handle;
412};
413
414struct tegra_vgpu_tsg_bind_gr_ctx_params {
415 u32 tsg_id;
416 u64 gr_ctx_handle;
417};
418
419struct tegra_vgpu_tsg_bind_unbind_channel_params {
420 u32 tsg_id;
421 u64 ch_handle;
422};
423
424struct tegra_vgpu_tsg_preempt_params {
425 u32 tsg_id;
426};
427
428struct tegra_vgpu_tsg_timeslice_params {
429 u32 tsg_id;
430 u32 timeslice_us;
431};
432
433struct tegra_vgpu_tsg_open_rel_params {
434 u32 tsg_id;
435};
436
437/* level follows nvgpu.h definitions */
438struct tegra_vgpu_tsg_runlist_interleave_params {
439 u32 tsg_id;
440 u32 level;
441};
442
443struct tegra_vgpu_read_ptimer_params {
444 u64 time;
445};
446
447#define TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT 16
448#define TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_SRC_ID_TSC 1
449struct tegra_vgpu_get_timestamps_zipper_params {
450 /* timestamp pairs */
451 struct {
452 /* gpu timestamp value */
453 u64 cpu_timestamp;
454 /* raw GPU counter (PTIMER) value */
455 u64 gpu_timestamp;
456 } samples[TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT];
457 /* number of pairs to read */
458 u32 count;
459 /* cpu clock source id */
460 u32 source_id;
461};
462
463#define TEGRA_VGPU_POWERGATE_MODE_ENABLE 1
464#define TEGRA_VGPU_POWERGATE_MODE_DISABLE 2
465struct tegra_vgpu_set_powergate_params {
466 u32 mode;
467};
468
469struct tegra_vgpu_gpu_clk_rate_params {
470 u32 rate; /* in kHz */
471};
472
473/* TEGRA_VGPU_MAX_ENGINES must be equal or greater than num_engines */
474#define TEGRA_VGPU_MAX_ENGINES 4
475struct tegra_vgpu_engines_info {
476 u32 num_engines;
477 struct engineinfo {
478 u32 engine_id;
479 u32 intr_mask;
480 u32 reset_mask;
481 u32 runlist_id;
482 u32 pbdma_id;
483 u32 inst_id;
484 u32 pri_base;
485 u32 engine_enum;
486 u32 fault_id;
487 } info[TEGRA_VGPU_MAX_ENGINES];
488};
489
490#define TEGRA_VGPU_MAX_GPC_COUNT 16
491#define TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC 16
492#define TEGRA_VGPU_L2_EN_MASK 32
493
494struct tegra_vgpu_constants_params {
495 u32 arch;
496 u32 impl;
497 u32 rev;
498 u32 max_freq;
499 u32 num_channels;
500 u32 golden_ctx_size;
501 u32 zcull_ctx_size;
502 u32 l2_size;
503 u32 ltc_count;
504 u32 cacheline_size;
505 u32 slices_per_ltc;
506 u32 comptags_per_cacheline;
507 u32 comptag_lines;
508 u32 sm_arch_sm_version;
509 u32 sm_arch_spa_version;
510 u32 sm_arch_warp_count;
511 u32 max_gpc_count;
512 u32 gpc_count;
513 u32 max_tpc_per_gpc_count;
514 u32 num_fbps;
515 u32 fbp_en_mask;
516 u32 ltc_per_fbp;
517 u32 max_lts_per_ltc;
518 u8 gpc_tpc_count[TEGRA_VGPU_MAX_GPC_COUNT];
519 /* mask bits should be equal or larger than
520 * TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC
521 */
522 u16 gpc_tpc_mask[TEGRA_VGPU_MAX_GPC_COUNT];
523 u32 hwpm_ctx_size;
524 u8 force_preempt_mode;
525 u8 can_set_clkrate;
526 u32 default_timeslice_us;
527 u32 preempt_ctx_size;
528 u32 channel_base;
529 struct tegra_vgpu_engines_info engines_info;
530 u32 num_pce;
531 u32 sm_per_tpc;
532 u32 max_subctx_count;
533 u32 l2_en_mask[TEGRA_VGPU_L2_EN_MASK];
534};
535
536enum {
537 TEGRA_VGPU_CYCLE_STATS_SNAPSHOT_CMD_FLUSH = 0,
538 TEGRA_VGPU_CYCLE_STATS_SNAPSHOT_CMD_ATTACH = 1,
539 TEGRA_VGPU_CYCLE_STATS_SNAPSHOT_CMD_DETACH = 2,
540};
541
542struct tegra_vgpu_channel_cyclestats_snapshot_params {
543 u64 handle;
544 u32 perfmon_start;
545 u32 perfmon_count;
546 u32 buf_info; /* client->srvr: get ptr; srvr->client: num pending */
547 u8 subcmd;
548 u8 hw_overflow;
549};
550
551struct tegra_vgpu_gpu_load_params {
552 u32 load;
553};
554
555struct tegra_vgpu_suspend_resume_contexts {
556 u32 num_channels;
557 u16 resident_chid;
558};
559
560struct tegra_vgpu_clear_sm_error_state {
561 u64 handle;
562 u32 sm_id;
563};
564
565enum {
566 TEGRA_VGPU_PROF_GET_GLOBAL = 0,
567 TEGRA_VGPU_PROF_GET_CONTEXT,
568 TEGRA_VGPU_PROF_RELEASE
569};
570
571struct tegra_vgpu_prof_mgt_params {
572 u32 mode;
573};
574
575struct tegra_vgpu_perfbuf_mgt_params {
576 u64 vm_handle;
577 u64 offset;
578 u32 size;
579};
580
581#define TEGRA_VGPU_GPU_FREQ_TABLE_SIZE 25
582
583struct tegra_vgpu_get_gpu_freq_table_params {
584 u32 num_freqs;
585};
586
587struct tegra_vgpu_vsms_mapping_params {
588 u32 num_sm;
589};
590
591struct tegra_vgpu_vsms_mapping_entry {
592 u32 gpc_index;
593 u32 tpc_index;
594 u32 sm_index;
595 u32 global_tpc_index;
596};
597
598struct tegra_vgpu_alloc_ctx_header_params {
599 u64 ch_handle;
600 u64 ctx_header_va;
601};
602
603struct tegra_vgpu_free_ctx_header_params {
604 u64 ch_handle;
605};
606
607struct tegra_vgpu_map_syncpt_params {
608 u64 as_handle;
609 u64 gpu_va;
610 u64 len;
611 u64 offset;
612 u8 prot;
613};
614
615struct tegra_vgpu_tsg_bind_channel_ex_params {
616 u32 tsg_id;
617 u64 ch_handle;
618 u32 subctx_id;
619 u32 runqueue_sel;
620};
621
622struct tegra_vgpu_fb_set_mmu_debug_mode_params {
623 u8 enable;
624};
625
626struct tegra_vgpu_gr_set_mmu_debug_mode_params {
627 u64 ch_handle;
628 u8 enable;
629};
630
631struct tegra_vgpu_cmd_msg {
632 u32 cmd;
633 int ret;
634 u64 handle;
635 union {
636 struct tegra_vgpu_connect_params connect;
637 struct tegra_vgpu_channel_hwctx_params channel_hwctx;
638 struct tegra_vgpu_attrib_params attrib;
639 struct tegra_vgpu_as_share_params as_share;
640 struct tegra_vgpu_as_bind_share_params as_bind_share;
641 struct tegra_vgpu_as_map_params as_map;
642 struct tegra_vgpu_as_map_ex_params as_map_ex;
643 struct tegra_vgpu_channel_config_params channel_config;
644 struct tegra_vgpu_ramfc_params ramfc;
645 struct tegra_vgpu_ch_ctx_params ch_ctx;
646 struct tegra_vgpu_zcull_bind_params zcull_bind;
647 struct tegra_vgpu_cache_maint_params cache_maint;
648 struct tegra_vgpu_runlist_params runlist;
649 struct tegra_vgpu_golden_ctx_params golden_ctx;
650 struct tegra_vgpu_zcull_info_params zcull_info;
651 struct tegra_vgpu_zbc_set_table_params zbc_set_table;
652 struct tegra_vgpu_zbc_query_table_params zbc_query_table;
653 struct tegra_vgpu_gr_bind_ctxsw_buffers_params gr_bind_ctxsw_buffers;
654 struct tegra_vgpu_mmu_debug_mode mmu_debug_mode;
655 struct tegra_vgpu_sm_debug_mode sm_debug_mode;
656 struct tegra_vgpu_reg_ops_params reg_ops;
657 struct tegra_vgpu_channel_priority_params channel_priority;
658 struct tegra_vgpu_channel_runlist_interleave_params channel_interleave;
659 struct tegra_vgpu_channel_timeslice_params channel_timeslice;
660 struct tegra_vgpu_fecs_trace_filter fecs_trace_filter;
661 struct tegra_vgpu_channel_set_ctxsw_mode set_ctxsw_mode;
662 struct tegra_vgpu_channel_free_hwpm_ctx free_hwpm_ctx;
663 struct tegra_vgpu_gr_ctx_params gr_ctx;
664 struct tegra_vgpu_channel_bind_gr_ctx_params ch_bind_gr_ctx;
665 struct tegra_vgpu_tsg_bind_gr_ctx_params tsg_bind_gr_ctx;
666 struct tegra_vgpu_tsg_bind_unbind_channel_params tsg_bind_unbind_channel;
667 struct tegra_vgpu_tsg_open_rel_params tsg_open;
668 struct tegra_vgpu_tsg_open_rel_params tsg_release;
669 struct tegra_vgpu_tsg_preempt_params tsg_preempt;
670 struct tegra_vgpu_tsg_timeslice_params tsg_timeslice;
671 struct tegra_vgpu_tsg_runlist_interleave_params tsg_interleave;
672 struct tegra_vgpu_read_ptimer_params read_ptimer;
673 struct tegra_vgpu_set_powergate_params set_powergate;
674 struct tegra_vgpu_gpu_clk_rate_params gpu_clk_rate;
675 struct tegra_vgpu_constants_params constants;
676 struct tegra_vgpu_channel_cyclestats_snapshot_params cyclestats_snapshot;
677 struct tegra_vgpu_gpu_load_params gpu_load;
678 struct tegra_vgpu_suspend_resume_contexts suspend_contexts;
679 struct tegra_vgpu_suspend_resume_contexts resume_contexts;
680 struct tegra_vgpu_clear_sm_error_state clear_sm_error_state;
681 struct tegra_vgpu_prof_mgt_params prof_management;
682 struct tegra_vgpu_perfbuf_mgt_params perfbuf_management;
683 struct tegra_vgpu_get_timestamps_zipper_params get_timestamps_zipper;
684 struct tegra_vgpu_get_gpu_freq_table_params get_gpu_freq_table;
685 struct tegra_vgpu_vsms_mapping_params vsms_mapping;
686 struct tegra_vgpu_alloc_ctx_header_params alloc_ctx_header;
687 struct tegra_vgpu_free_ctx_header_params free_ctx_header;
688 struct tegra_vgpu_map_syncpt_params map_syncpt;
689 struct tegra_vgpu_tsg_bind_channel_ex_params tsg_bind_channel_ex;
690 struct tegra_vgpu_channel_update_pc_sampling update_pc_sampling;
691 struct tegra_vgpu_ecc_info_params ecc_info;
692 struct tegra_vgpu_ecc_counter_params ecc_counter;
693 struct tegra_vgpu_fb_set_mmu_debug_mode_params fb_set_mmu_debug_mode;
694 struct tegra_vgpu_gr_set_mmu_debug_mode_params gr_set_mmu_debug_mode;
695 char padding[192];
696 } params;
697};
698
699enum {
700 TEGRA_VGPU_GR_INTR_NOTIFY = 0,
701 TEGRA_VGPU_GR_INTR_SEMAPHORE_TIMEOUT = 1,
702 TEGRA_VGPU_GR_INTR_ILLEGAL_NOTIFY = 2,
703 TEGRA_VGPU_GR_INTR_ILLEGAL_METHOD = 3,
704 TEGRA_VGPU_GR_INTR_ILLEGAL_CLASS = 4,
705 TEGRA_VGPU_GR_INTR_FECS_ERROR = 5,
706 TEGRA_VGPU_GR_INTR_CLASS_ERROR = 6,
707 TEGRA_VGPU_GR_INTR_FIRMWARE_METHOD = 7,
708 TEGRA_VGPU_GR_INTR_EXCEPTION = 8,
709 TEGRA_VGPU_GR_INTR_SEMAPHORE = 9,
710 TEGRA_VGPU_FIFO_INTR_PBDMA = 10,
711 TEGRA_VGPU_FIFO_INTR_CTXSW_TIMEOUT = 11,
712 TEGRA_VGPU_FIFO_INTR_MMU_FAULT = 12,
713 TEGRA_VGPU_GR_INTR_SM_EXCEPTION = 16,
714};
715
716struct tegra_vgpu_gr_intr_info {
717 u32 type;
718 u32 chid;
719};
720
721struct tegra_vgpu_gr_nonstall_intr_info {
722 u32 type;
723};
724
725struct tegra_vgpu_fifo_intr_info {
726 u32 type;
727 u32 chid;
728};
729
730struct tegra_vgpu_fifo_nonstall_intr_info {
731 u32 type;
732};
733
734struct tegra_vgpu_ce2_nonstall_intr_info {
735 u32 type;
736};
737
738enum {
739 TEGRA_VGPU_FECS_TRACE_DATA_UPDATE = 0
740};
741
742struct tegra_vgpu_fecs_trace_event_info {
743 u32 type;
744};
745
746#define TEGRA_VGPU_CHANNEL_EVENT_ID_MAX 6
747struct tegra_vgpu_channel_event_info {
748 u32 event_id;
749 u32 is_tsg;
750 u32 id; /* channel id or tsg id */
751};
752
753struct tegra_vgpu_sm_esr_info {
754 u32 tsg_id;
755 u32 sm_id;
756 u32 hww_global_esr;
757 u32 hww_warp_esr;
758 u64 hww_warp_esr_pc;
759 u32 hww_global_esr_report_mask;
760 u32 hww_warp_esr_report_mask;
761};
762
763struct tegra_vgpu_semaphore_wakeup {
764 u32 post_events;
765};
766
767struct tegra_vgpu_channel_cleanup {
768 u32 chid;
769};
770
771struct tegra_vgpu_channel_set_error_notifier {
772 u32 chid;
773 u32 error;
774};
775
776enum {
777
778 TEGRA_VGPU_INTR_GR = 0,
779 TEGRA_VGPU_INTR_FIFO = 1,
780 TEGRA_VGPU_INTR_CE2 = 2,
781};
782
783enum {
784 TEGRA_VGPU_EVENT_INTR = 0,
785 TEGRA_VGPU_EVENT_ABORT = 1,
786 TEGRA_VGPU_EVENT_FECS_TRACE = 2,
787 TEGRA_VGPU_EVENT_CHANNEL = 3,
788 TEGRA_VGPU_EVENT_SM_ESR = 4,
789 TEGRA_VGPU_EVENT_SEMAPHORE_WAKEUP = 5,
790 TEGRA_VGPU_EVENT_CHANNEL_CLEANUP = 6,
791 TEGRA_VGPU_EVENT_SET_ERROR_NOTIFIER = 7,
792};
793
794struct tegra_vgpu_intr_msg {
795 unsigned int event;
796 u32 unit;
797 union {
798 struct tegra_vgpu_gr_intr_info gr_intr;
799 struct tegra_vgpu_gr_nonstall_intr_info gr_nonstall_intr;
800 struct tegra_vgpu_fifo_intr_info fifo_intr;
801 struct tegra_vgpu_fifo_nonstall_intr_info fifo_nonstall_intr;
802 struct tegra_vgpu_ce2_nonstall_intr_info ce2_nonstall_intr;
803 struct tegra_vgpu_fecs_trace_event_info fecs_trace;
804 struct tegra_vgpu_channel_event_info channel_event;
805 struct tegra_vgpu_sm_esr_info sm_esr;
806 struct tegra_vgpu_semaphore_wakeup sem_wakeup;
807 struct tegra_vgpu_channel_cleanup ch_cleanup;
808 struct tegra_vgpu_channel_set_error_notifier set_error_notifier;
809 char padding[32];
810 } info;
811};
812
813#define TEGRA_VGPU_QUEUE_SIZES \
814 512, \
815 sizeof(struct tegra_vgpu_intr_msg)
816
817#endif
diff --git a/include/nvgpu/vgpu/vgpu.h b/include/nvgpu/vgpu/vgpu.h
deleted file mode 100644
index ecdb896..0000000
--- a/include/nvgpu/vgpu/vgpu.h
+++ /dev/null
@@ -1,110 +0,0 @@
1/*
2 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_COMMON_H__
24#define __VGPU_COMMON_H__
25
26#include <nvgpu/types.h>
27#include <nvgpu/thread.h>
28#include <nvgpu/log.h>
29#include <nvgpu/lock.h>
30#include <nvgpu/vgpu/tegra_vgpu.h>
31
32struct device;
33struct tegra_vgpu_gr_intr_info;
34struct tegra_vgpu_fifo_intr_info;
35struct tegra_vgpu_cmd_msg;
36struct nvgpu_mem;
37struct gk20a;
38struct vm_gk20a;
39struct nvgpu_gr_ctx;
40struct nvgpu_cpu_time_correlation_sample;
41struct vgpu_ecc_stat;
42struct channel_gk20a;
43
44struct vgpu_priv_data {
45 u64 virt_handle;
46 struct nvgpu_thread intr_handler;
47 struct tegra_vgpu_constants_params constants;
48 struct vgpu_ecc_stat *ecc_stats;
49 int ecc_stats_count;
50 u32 num_freqs;
51 unsigned long *freqs;
52 struct nvgpu_mutex vgpu_clk_get_freq_lock;
53};
54
55struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g);
56
57static inline u64 vgpu_get_handle(struct gk20a *g)
58{
59 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
60
61 if (unlikely(!priv)) {
62 nvgpu_err(g, "invalid vgpu_priv_data in %s", __func__);
63 return INT_MAX;
64 }
65
66 return priv->virt_handle;
67}
68
69int vgpu_comm_init(struct gk20a *g);
70void vgpu_comm_deinit(void);
71int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
72 size_t size_out);
73u64 vgpu_connect(void);
74int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value);
75int vgpu_intr_thread(void *dev_id);
76void vgpu_remove_support_common(struct gk20a *g);
77void vgpu_detect_chip(struct gk20a *g);
78int vgpu_init_gpu_characteristics(struct gk20a *g);
79int vgpu_read_ptimer(struct gk20a *g, u64 *value);
80int vgpu_get_timestamps_zipper(struct gk20a *g,
81 u32 source_id, u32 count,
82 struct nvgpu_cpu_time_correlation_sample *samples);
83int vgpu_init_hal(struct gk20a *g);
84int vgpu_get_constants(struct gk20a *g);
85u64 vgpu_bar1_map(struct gk20a *g, struct nvgpu_mem *mem);
86int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info);
87int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
88 struct nvgpu_gr_ctx *gr_ctx,
89 struct vm_gk20a *vm,
90 u32 class,
91 u32 flags);
92void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
93 struct nvgpu_gr_ctx *gr_ctx);
94void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
95 struct tegra_vgpu_sm_esr_info *info);
96int vgpu_gr_init_ctx_state(struct gk20a *g);
97int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info);
98u32 vgpu_ce_get_num_pce(struct gk20a *g);
99int vgpu_init_mm_support(struct gk20a *g);
100int vgpu_init_gr_support(struct gk20a *g);
101int vgpu_init_fifo_support(struct gk20a *g);
102
103int vgpu_gp10b_init_hal(struct gk20a *g);
104int vgpu_gv11b_init_hal(struct gk20a *g);
105
106bool vgpu_is_reduced_bar1(struct gk20a *g);
107
108int vgpu_gr_set_mmu_debug_mode(struct gk20a *g,
109 struct channel_gk20a *ch, bool enable);
110#endif
diff --git a/include/nvgpu/vgpu/vgpu_ivc.h b/include/nvgpu/vgpu/vgpu_ivc.h
deleted file mode 100644
index e7e4026..0000000
--- a/include/nvgpu/vgpu/vgpu_ivc.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_IVC_H__
24#define __VGPU_IVC_H__
25
26#include <nvgpu/types.h>
27
28struct gk20a;
29
30int vgpu_ivc_init(struct gk20a *g, u32 elems,
31 const size_t *queue_sizes, u32 queue_start, u32 num_queues);
32void vgpu_ivc_deinit(u32 queue_start, u32 num_queues);
33void vgpu_ivc_release(void *handle);
34u32 vgpu_ivc_get_server_vmid(void);
35int vgpu_ivc_recv(u32 index, void **handle, void **data,
36 size_t *size, u32 *sender);
37int vgpu_ivc_send(u32 peer, u32 index, void *data, size_t size);
38int vgpu_ivc_sendrecv(u32 peer, u32 index, void **handle,
39 void **data, size_t *size);
40u32 vgpu_ivc_get_peer_self(void);
41void *vgpu_ivc_oob_get_ptr(u32 peer, u32 index, void **ptr,
42 size_t *size);
43void vgpu_ivc_oob_put_ptr(void *handle);
44
45#endif
diff --git a/include/nvgpu/vgpu/vgpu_ivm.h b/include/nvgpu/vgpu/vgpu_ivm.h
deleted file mode 100644
index cecdd51..0000000
--- a/include/nvgpu/vgpu/vgpu_ivm.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VGPU_IVM_H__
24#define __VGPU_IVM_H__
25
26#include <nvgpu/types.h>
27
28struct tegra_hv_ivm_cookie;
29
30struct tegra_hv_ivm_cookie *vgpu_ivm_mempool_reserve(unsigned int id);
31int vgpu_ivm_mempool_unreserve(struct tegra_hv_ivm_cookie *cookie);
32u64 vgpu_ivm_get_ipa(struct tegra_hv_ivm_cookie *cookie);
33u64 vgpu_ivm_get_size(struct tegra_hv_ivm_cookie *cookie);
34void *vgpu_ivm_mempool_map(struct tegra_hv_ivm_cookie *cookie);
35void vgpu_ivm_mempool_unmap(struct tegra_hv_ivm_cookie *cookie,
36 void *addr);
37#endif
diff --git a/include/nvgpu/vgpu/vm.h b/include/nvgpu/vgpu/vm.h
deleted file mode 100644
index fc0078d..0000000
--- a/include/nvgpu/vgpu/vm.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef NVGPU_VGPU_VM_H
24#define NVGPU_VGPU_VM_H
25
26#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
27int vgpu_vm_init(struct gk20a *g, struct vm_gk20a *vm);
28void vgpu_vm_remove(struct vm_gk20a *vm);
29#endif
30
31#endif /* NVGPU_VGPU_VM_H */