1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
|
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/string.h>
#include <linux/tegra-ivc.h>
#include <linux/tegra_vgpu.h>
#include <nvgpu/kmem.h>
#include <nvgpu/bug.h>
#include "gk20a/gk20a.h"
#include "gk20a/ctxsw_trace_gk20a.h"
#include "vgpu.h"
#include "fecs_trace_vgpu.h"
struct vgpu_fecs_trace {
struct tegra_hv_ivm_cookie *cookie;
struct nvgpu_ctxsw_ring_header *header;
struct nvgpu_ctxsw_trace_entry *entries;
int num_entries;
bool enabled;
void *buf;
};
static int vgpu_fecs_trace_init(struct gk20a *g)
{
struct device *dev = dev_from_gk20a(g);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
struct device_node *hv_np;
struct vgpu_fecs_trace *vcst;
u32 mempool;
int err;
gk20a_dbg_fn("");
vcst = nvgpu_kzalloc(g, sizeof(*vcst));
if (!vcst)
return -ENOMEM;
err = of_parse_phandle_with_fixed_args(np,
"mempool-fecs-trace", 1, 0, &args);
if (err) {
dev_info(dev_from_gk20a(g), "does not support fecs trace\n");
goto fail;
}
g->gpu_characteristics.flags |=
NVGPU_GPU_FLAGS_SUPPORT_FECS_CTXSW_TRACE;
hv_np = args.np;
mempool = args.args[0];
vcst->cookie = tegra_hv_mempool_reserve(hv_np, mempool);
if (IS_ERR(vcst->cookie)) {
dev_info(dev_from_gk20a(g),
"mempool %u reserve failed\n", mempool);
vcst->cookie = NULL;
err = -EINVAL;
goto fail;
}
vcst->buf = ioremap_cache(vcst->cookie->ipa, vcst->cookie->size);
if (!vcst->buf) {
dev_info(dev_from_gk20a(g), "ioremap_cache failed\n");
err = -EINVAL;
goto fail;
}
vcst->header = vcst->buf;
vcst->num_entries = vcst->header->num_ents;
if (unlikely(vcst->header->ent_size != sizeof(*vcst->entries))) {
dev_err(dev_from_gk20a(g),
"entry size mismatch\n");
goto fail;
}
vcst->entries = vcst->buf + sizeof(*vcst->header);
g->fecs_trace = (struct gk20a_fecs_trace *)vcst;
return 0;
fail:
iounmap(vcst->buf);
if (vcst->cookie)
tegra_hv_mempool_unreserve(vcst->cookie);
nvgpu_kfree(g, vcst);
return err;
}
static int vgpu_fecs_trace_deinit(struct gk20a *g)
{
struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
iounmap(vcst->buf);
tegra_hv_mempool_unreserve(vcst->cookie);
nvgpu_kfree(g, vcst);
return 0;
}
static int vgpu_fecs_trace_enable(struct gk20a *g)
{
struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
struct tegra_vgpu_cmd_msg msg = {
.cmd = TEGRA_VGPU_CMD_FECS_TRACE_ENABLE,
.handle = vgpu_get_handle(g),
};
int err;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
WARN_ON(err);
vcst->enabled = !err;
return err;
}
static int vgpu_fecs_trace_disable(struct gk20a *g)
{
struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
struct tegra_vgpu_cmd_msg msg = {
.cmd = TEGRA_VGPU_CMD_FECS_TRACE_DISABLE,
.handle = vgpu_get_handle(g),
};
int err;
vcst->enabled = false;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
WARN_ON(err);
return err;
}
static bool vpgpu_fecs_trace_is_enabled(struct gk20a *g)
{
struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
return (vcst && vcst->enabled);
}
static int vgpu_fecs_trace_poll(struct gk20a *g)
{
struct tegra_vgpu_cmd_msg msg = {
.cmd = TEGRA_VGPU_CMD_FECS_TRACE_POLL,
.handle = vgpu_get_handle(g),
};
int err;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
WARN_ON(err);
return err;
}
static int vgpu_alloc_user_buffer(struct gk20a *g, void **buf, size_t *size)
{
struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
*buf = vcst->buf;
*size = vcst->cookie->size;
return 0;
}
static int vgpu_free_user_buffer(struct gk20a *g)
{
return 0;
}
static int vgpu_mmap_user_buffer(struct gk20a *g, struct vm_area_struct *vma)
{
struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
unsigned long size = vcst->cookie->size;
unsigned long vsize = vma->vm_end - vma->vm_start;
size = min(size, vsize);
size = round_up(size, PAGE_SIZE);
return remap_pfn_range(vma, vma->vm_start,
vcst->cookie->ipa >> PAGE_SHIFT,
size,
vma->vm_page_prot);
}
static int vgpu_fecs_trace_max_entries(struct gk20a *g,
struct nvgpu_ctxsw_trace_filter *filter)
{
struct vgpu_fecs_trace *vcst = (struct vgpu_fecs_trace *)g->fecs_trace;
return vcst->header->num_ents;
}
#if NVGPU_CTXSW_FILTER_SIZE != TEGRA_VGPU_FECS_TRACE_FILTER_SIZE
#error "FECS trace filter size mismatch!"
#endif
static int vgpu_fecs_trace_set_filter(struct gk20a *g,
struct nvgpu_ctxsw_trace_filter *filter)
{
struct tegra_vgpu_cmd_msg msg = {
.cmd = TEGRA_VGPU_CMD_FECS_TRACE_SET_FILTER,
.handle = vgpu_get_handle(g),
};
struct tegra_vgpu_fecs_trace_filter *p = &msg.params.fecs_trace_filter;
int err;
memcpy(&p->tag_bits, &filter->tag_bits, sizeof(p->tag_bits));
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
WARN_ON(err);
return err;
}
void vgpu_init_fecs_trace_ops(struct gpu_ops *ops)
{
ops->fecs_trace.init = vgpu_fecs_trace_init;
ops->fecs_trace.deinit = vgpu_fecs_trace_deinit;
ops->fecs_trace.enable = vgpu_fecs_trace_enable;
ops->fecs_trace.disable = vgpu_fecs_trace_disable;
ops->fecs_trace.is_enabled = vpgpu_fecs_trace_is_enabled;
ops->fecs_trace.reset = NULL;
ops->fecs_trace.flush = NULL;
ops->fecs_trace.poll = vgpu_fecs_trace_poll;
ops->fecs_trace.bind_channel = NULL;
ops->fecs_trace.unbind_channel = NULL;
ops->fecs_trace.max_entries = vgpu_fecs_trace_max_entries;
ops->fecs_trace.alloc_user_buffer = vgpu_alloc_user_buffer;
ops->fecs_trace.free_user_buffer = vgpu_free_user_buffer;
ops->fecs_trace.mmap_user_buffer = vgpu_mmap_user_buffer;
ops->fecs_trace.set_filter = vgpu_fecs_trace_set_filter;
}
void vgpu_fecs_trace_data_update(struct gk20a *g)
{
gk20a_ctxsw_trace_wake_up(g, 0);
}
|