1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
|
/*
* Virtualized GPU Interfaces
*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _VIRT_H_
#define _VIRT_H_
#include <linux/tegra_gr_comm.h>
#include <linux/tegra_vgpu.h>
#include "gk20a/gk20a.h"
#include "gk20a/platform_gk20a.h"
#include "common/linux/os_linux.h"
#include <nvgpu/thread.h>
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
struct vgpu_priv_data {
u64 virt_handle;
struct nvgpu_thread intr_handler;
struct tegra_vgpu_constants_params constants;
};
static inline
struct vgpu_priv_data *vgpu_get_priv_data_from_dev(struct device *dev)
{
struct gk20a_platform *plat = gk20a_get_platform(dev);
return (struct vgpu_priv_data *)plat->vgpu_priv;
}
static inline struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g)
{
return vgpu_get_priv_data_from_dev(dev_from_gk20a(g));
}
static inline u64 vgpu_get_handle_from_dev(struct device *dev)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data_from_dev(dev);
if (unlikely(!priv)) {
dev_err(dev, "invalid vgpu_priv_data in %s\n", __func__);
return INT_MAX;
}
return priv->virt_handle;
}
static inline u64 vgpu_get_handle(struct gk20a *g)
{
return vgpu_get_handle_from_dev(dev_from_gk20a(g));
}
int vgpu_pm_prepare_poweroff(struct device *dev);
int vgpu_pm_finalize_poweron(struct device *dev);
int vgpu_probe(struct platform_device *dev);
int vgpu_remove(struct platform_device *dev);
u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size);
int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info);
int vgpu_gr_nonstall_isr(struct gk20a *g,
struct tegra_vgpu_gr_nonstall_intr_info *info);
int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
struct gr_ctx_desc **__gr_ctx,
struct vm_gk20a *vm,
u32 class,
u32 flags);
void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct gr_ctx_desc *gr_ctx);
void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
struct tegra_vgpu_sm_esr_info *info);
int vgpu_gr_init_ctx_state(struct gk20a *g);
int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info);
int vgpu_fifo_nonstall_isr(struct gk20a *g,
struct tegra_vgpu_fifo_nonstall_intr_info *info);
int vgpu_ce2_nonstall_isr(struct gk20a *g,
struct tegra_vgpu_ce2_nonstall_intr_info *info);
void vgpu_init_fifo_ops(struct gpu_ops *gops);
void vgpu_init_gr_ops(struct gpu_ops *gops);
void vgpu_init_ltc_ops(struct gpu_ops *gops);
void vgpu_init_mm_ops(struct gpu_ops *gops);
void vgpu_init_debug_ops(struct gpu_ops *gops);
void vgpu_init_tsg_ops(struct gpu_ops *gops);
#if defined(CONFIG_GK20A_CYCLE_STATS)
void vgpu_init_css_ops(struct gpu_ops *gops);
#endif
int vgpu_init_mm_support(struct gk20a *g);
int vgpu_init_gr_support(struct gk20a *g);
int vgpu_init_fifo_support(struct gk20a *g);
int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value);
int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
size_t size_out);
void vgpu_init_hal_common(struct gk20a *g);
int vgpu_gm20b_init_hal(struct gk20a *g);
int vgpu_gp10b_init_hal(struct gk20a *g);
void vgpu_init_dbg_session_ops(struct gpu_ops *gops);
void vgpu_create_sysfs(struct device *dev);
void vgpu_remove_sysfs(struct device *dev);
#else
static inline int vgpu_pm_prepare_poweroff(struct device *dev)
{
return -ENOSYS;
}
static inline int vgpu_pm_finalize_poweron(struct device *dev)
{
return -ENOSYS;
}
static inline int vgpu_probe(struct platform_device *dev)
{
return -ENOSYS;
}
static inline int vgpu_remove(struct platform_device *dev)
{
return -ENOSYS;
}
static inline u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt,
u64 size)
{
return 0;
}
static inline int vgpu_gr_isr(struct gk20a *g,
struct tegra_vgpu_gr_intr_info *info)
{
return 0;
}
static inline int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
struct gr_ctx_desc **__gr_ctx,
struct vm_gk20a *vm,
u32 class,
u32 flags)
{
return -ENOSYS;
}
static inline void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct gr_ctx_desc *gr_ctx)
{
}
static inline int vgpu_gr_init_ctx_state(struct gk20a *g)
{
return -ENOSYS;
}
static inline int vgpu_fifo_isr(struct gk20a *g,
struct tegra_vgpu_fifo_intr_info *info)
{
return 0;
}
static inline void vgpu_init_fifo_ops(struct gpu_ops *gops)
{
}
static inline void vgpu_init_gr_ops(struct gpu_ops *gops)
{
}
static inline void vgpu_init_ltc_ops(struct gpu_ops *gops)
{
}
static inline void vgpu_init_mm_ops(struct gpu_ops *gops)
{
}
static inline void vgpu_init_debug_ops(struct gpu_ops *gops)
{
}
#if defined(CONFIG_GK20A_CYCLE_STATS)
static inline void vgpu_init_css_ops(struct gpu_ops *gops)
{
}
#endif
static inline int vgpu_init_mm_support(struct gk20a *g)
{
return -ENOSYS;
}
static inline int vgpu_init_gr_support(struct gk20a *g)
{
return -ENOSYS;
}
static inline int vgpu_init_fifo_support(struct gk20a *g)
{
return -ENOSYS;
}
static inline int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
{
return -ENOSYS;
}
static inline int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
size_t size_out)
{
return -ENOSYS;
}
#endif
#endif
|