diff options
author | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
---|---|---|
committer | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
commit | f347fde22f1297e4f022600d201780d5ead78114 (patch) | |
tree | 76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/linux-channel.c | |
parent | 8340d234d78a7d0f46c11a584de538148b78b7cb (diff) |
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/linux-channel.c')
-rw-r--r-- | include/os/linux/linux-channel.c | 657 |
1 files changed, 0 insertions, 657 deletions
diff --git a/include/os/linux/linux-channel.c b/include/os/linux/linux-channel.c deleted file mode 100644 index d035baf..0000000 --- a/include/os/linux/linux-channel.c +++ /dev/null | |||
@@ -1,657 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017-2018, NVIDIA Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | |||
17 | #include <nvgpu/enabled.h> | ||
18 | #include <nvgpu/debug.h> | ||
19 | #include <nvgpu/error_notifier.h> | ||
20 | #include <nvgpu/os_sched.h> | ||
21 | #include <nvgpu/gk20a.h> | ||
22 | #include <nvgpu/channel.h> | ||
23 | #include <nvgpu/dma.h> | ||
24 | |||
25 | /* | ||
26 | * This is required for nvgpu_vm_find_buf() which is used in the tracing | ||
27 | * code. Once we can get and access userspace buffers without requiring | ||
28 | * direct dma_buf usage this can be removed. | ||
29 | */ | ||
30 | #include <nvgpu/linux/vm.h> | ||
31 | |||
32 | #include "channel.h" | ||
33 | #include "ioctl_channel.h" | ||
34 | #include "os_linux.h" | ||
35 | #include "dmabuf.h" | ||
36 | |||
37 | #include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> | ||
38 | |||
39 | #include <linux/uaccess.h> | ||
40 | #include <linux/dma-buf.h> | ||
41 | #include <trace/events/gk20a.h> | ||
42 | #include <uapi/linux/nvgpu.h> | ||
43 | |||
44 | #include "sync_sema_android.h" | ||
45 | |||
46 | u32 nvgpu_submit_gpfifo_user_flags_to_common_flags(u32 user_flags) | ||
47 | { | ||
48 | u32 flags = 0; | ||
49 | |||
50 | if (user_flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT) | ||
51 | flags |= NVGPU_SUBMIT_FLAGS_FENCE_WAIT; | ||
52 | |||
53 | if (user_flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) | ||
54 | flags |= NVGPU_SUBMIT_FLAGS_FENCE_GET; | ||
55 | |||
56 | if (user_flags & NVGPU_SUBMIT_GPFIFO_FLAGS_HW_FORMAT) | ||
57 | flags |= NVGPU_SUBMIT_FLAGS_HW_FORMAT; | ||
58 | |||
59 | if (user_flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) | ||
60 | flags |= NVGPU_SUBMIT_FLAGS_SYNC_FENCE; | ||
61 | |||
62 | if (user_flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SUPPRESS_WFI) | ||
63 | flags |= NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI; | ||
64 | |||
65 | if (user_flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SKIP_BUFFER_REFCOUNTING) | ||
66 | flags |= NVGPU_SUBMIT_FLAGS_SKIP_BUFFER_REFCOUNTING; | ||
67 | |||
68 | return flags; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * API to convert error_notifiers in common code and of the form | ||
73 | * NVGPU_ERR_NOTIFIER_* into Linux specific error_notifiers exposed to user | ||
74 | * space and of the form NVGPU_CHANNEL_* | ||
75 | */ | ||
76 | static u32 nvgpu_error_notifier_to_channel_notifier(u32 error_notifier) | ||
77 | { | ||
78 | switch (error_notifier) { | ||
79 | case NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT: | ||
80 | return NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT; | ||
81 | case NVGPU_ERR_NOTIFIER_GR_ERROR_SW_METHOD: | ||
82 | return NVGPU_CHANNEL_GR_ERROR_SW_METHOD; | ||
83 | case NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY: | ||
84 | return NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY; | ||
85 | case NVGPU_ERR_NOTIFIER_GR_EXCEPTION: | ||
86 | return NVGPU_CHANNEL_GR_EXCEPTION; | ||
87 | case NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT: | ||
88 | return NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT; | ||
89 | case NVGPU_ERR_NOTIFIER_GR_ILLEGAL_NOTIFY: | ||
90 | return NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY; | ||
91 | case NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT: | ||
92 | return NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT; | ||
93 | case NVGPU_ERR_NOTIFIER_PBDMA_ERROR: | ||
94 | return NVGPU_CHANNEL_PBDMA_ERROR; | ||
95 | case NVGPU_ERR_NOTIFIER_FECS_ERR_UNIMP_FIRMWARE_METHOD: | ||
96 | return NVGPU_CHANNEL_FECS_ERR_UNIMP_FIRMWARE_METHOD; | ||
97 | case NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR: | ||
98 | return NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR; | ||
99 | case NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH: | ||
100 | return NVGPU_CHANNEL_PBDMA_PUSHBUFFER_CRC_MISMATCH; | ||
101 | } | ||
102 | |||
103 | pr_warn("%s: invalid error_notifier requested %u\n", __func__, error_notifier); | ||
104 | |||
105 | return error_notifier; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * nvgpu_set_error_notifier_locked() | ||
110 | * Should be called with ch->error_notifier_mutex held | ||
111 | * | ||
112 | * error should be of the form NVGPU_ERR_NOTIFIER_* | ||
113 | */ | ||
114 | void nvgpu_set_error_notifier_locked(struct channel_gk20a *ch, u32 error) | ||
115 | { | ||
116 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
117 | |||
118 | error = nvgpu_error_notifier_to_channel_notifier(error); | ||
119 | |||
120 | if (priv->error_notifier.dmabuf) { | ||
121 | struct nvgpu_notification *notification = | ||
122 | priv->error_notifier.notification; | ||
123 | struct timespec time_data; | ||
124 | u64 nsec; | ||
125 | |||
126 | getnstimeofday(&time_data); | ||
127 | nsec = ((u64)time_data.tv_sec) * 1000000000u + | ||
128 | (u64)time_data.tv_nsec; | ||
129 | notification->time_stamp.nanoseconds[0] = | ||
130 | (u32)nsec; | ||
131 | notification->time_stamp.nanoseconds[1] = | ||
132 | (u32)(nsec >> 32); | ||
133 | notification->info32 = error; | ||
134 | notification->status = 0xffff; | ||
135 | |||
136 | nvgpu_err(ch->g, | ||
137 | "error notifier set to %d for ch %d", error, ch->chid); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /* error should be of the form NVGPU_ERR_NOTIFIER_* */ | ||
142 | void nvgpu_set_error_notifier(struct channel_gk20a *ch, u32 error) | ||
143 | { | ||
144 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
145 | |||
146 | nvgpu_mutex_acquire(&priv->error_notifier.mutex); | ||
147 | nvgpu_set_error_notifier_locked(ch, error); | ||
148 | nvgpu_mutex_release(&priv->error_notifier.mutex); | ||
149 | } | ||
150 | |||
151 | void nvgpu_set_error_notifier_if_empty(struct channel_gk20a *ch, u32 error) | ||
152 | { | ||
153 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
154 | |||
155 | nvgpu_mutex_acquire(&priv->error_notifier.mutex); | ||
156 | if (priv->error_notifier.dmabuf) { | ||
157 | struct nvgpu_notification *notification = | ||
158 | priv->error_notifier.notification; | ||
159 | |||
160 | /* Don't overwrite error flag if it is already set */ | ||
161 | if (notification->status != 0xffff) | ||
162 | nvgpu_set_error_notifier_locked(ch, error); | ||
163 | } | ||
164 | nvgpu_mutex_release(&priv->error_notifier.mutex); | ||
165 | } | ||
166 | |||
167 | /* error_notifier should be of the form NVGPU_ERR_NOTIFIER_* */ | ||
168 | bool nvgpu_is_error_notifier_set(struct channel_gk20a *ch, u32 error_notifier) | ||
169 | { | ||
170 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
171 | bool notifier_set = false; | ||
172 | |||
173 | error_notifier = nvgpu_error_notifier_to_channel_notifier(error_notifier); | ||
174 | |||
175 | nvgpu_mutex_acquire(&priv->error_notifier.mutex); | ||
176 | if (priv->error_notifier.dmabuf) { | ||
177 | struct nvgpu_notification *notification = | ||
178 | priv->error_notifier.notification; | ||
179 | u32 err = notification->info32; | ||
180 | |||
181 | if (err == error_notifier) | ||
182 | notifier_set = true; | ||
183 | } | ||
184 | nvgpu_mutex_release(&priv->error_notifier.mutex); | ||
185 | |||
186 | return notifier_set; | ||
187 | } | ||
188 | |||
189 | static void gk20a_channel_update_runcb_fn(struct work_struct *work) | ||
190 | { | ||
191 | struct nvgpu_channel_completion_cb *completion_cb = | ||
192 | container_of(work, struct nvgpu_channel_completion_cb, work); | ||
193 | struct nvgpu_channel_linux *priv = | ||
194 | container_of(completion_cb, | ||
195 | struct nvgpu_channel_linux, completion_cb); | ||
196 | struct channel_gk20a *ch = priv->ch; | ||
197 | void (*fn)(struct channel_gk20a *, void *); | ||
198 | void *user_data; | ||
199 | |||
200 | nvgpu_spinlock_acquire(&completion_cb->lock); | ||
201 | fn = completion_cb->fn; | ||
202 | user_data = completion_cb->user_data; | ||
203 | nvgpu_spinlock_release(&completion_cb->lock); | ||
204 | |||
205 | if (fn) | ||
206 | fn(ch, user_data); | ||
207 | } | ||
208 | |||
209 | static void nvgpu_channel_work_completion_init(struct channel_gk20a *ch) | ||
210 | { | ||
211 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
212 | |||
213 | priv->completion_cb.fn = NULL; | ||
214 | priv->completion_cb.user_data = NULL; | ||
215 | nvgpu_spinlock_init(&priv->completion_cb.lock); | ||
216 | INIT_WORK(&priv->completion_cb.work, gk20a_channel_update_runcb_fn); | ||
217 | } | ||
218 | |||
219 | static void nvgpu_channel_work_completion_clear(struct channel_gk20a *ch) | ||
220 | { | ||
221 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
222 | |||
223 | nvgpu_spinlock_acquire(&priv->completion_cb.lock); | ||
224 | priv->completion_cb.fn = NULL; | ||
225 | priv->completion_cb.user_data = NULL; | ||
226 | nvgpu_spinlock_release(&priv->completion_cb.lock); | ||
227 | cancel_work_sync(&priv->completion_cb.work); | ||
228 | } | ||
229 | |||
230 | static void nvgpu_channel_work_completion_signal(struct channel_gk20a *ch) | ||
231 | { | ||
232 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
233 | |||
234 | if (priv->completion_cb.fn) | ||
235 | schedule_work(&priv->completion_cb.work); | ||
236 | } | ||
237 | |||
238 | static void nvgpu_channel_work_completion_cancel_sync(struct channel_gk20a *ch) | ||
239 | { | ||
240 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
241 | |||
242 | if (priv->completion_cb.fn) | ||
243 | cancel_work_sync(&priv->completion_cb.work); | ||
244 | } | ||
245 | |||
246 | struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g, | ||
247 | void (*update_fn)(struct channel_gk20a *, void *), | ||
248 | void *update_fn_data, | ||
249 | int runlist_id, | ||
250 | bool is_privileged_channel) | ||
251 | { | ||
252 | struct channel_gk20a *ch; | ||
253 | struct nvgpu_channel_linux *priv; | ||
254 | |||
255 | ch = gk20a_open_new_channel(g, runlist_id, is_privileged_channel, | ||
256 | nvgpu_current_pid(g), nvgpu_current_tid(g)); | ||
257 | |||
258 | if (ch) { | ||
259 | priv = ch->os_priv; | ||
260 | nvgpu_spinlock_acquire(&priv->completion_cb.lock); | ||
261 | priv->completion_cb.fn = update_fn; | ||
262 | priv->completion_cb.user_data = update_fn_data; | ||
263 | nvgpu_spinlock_release(&priv->completion_cb.lock); | ||
264 | } | ||
265 | |||
266 | return ch; | ||
267 | } | ||
268 | |||
269 | static void nvgpu_channel_open_linux(struct channel_gk20a *ch) | ||
270 | { | ||
271 | } | ||
272 | |||
273 | static void nvgpu_channel_close_linux(struct channel_gk20a *ch) | ||
274 | { | ||
275 | nvgpu_channel_work_completion_clear(ch); | ||
276 | |||
277 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
278 | gk20a_channel_free_cycle_stats_buffer(ch); | ||
279 | gk20a_channel_free_cycle_stats_snapshot(ch); | ||
280 | #endif | ||
281 | } | ||
282 | |||
283 | static int nvgpu_channel_alloc_linux(struct gk20a *g, struct channel_gk20a *ch) | ||
284 | { | ||
285 | struct nvgpu_channel_linux *priv; | ||
286 | int err; | ||
287 | |||
288 | priv = nvgpu_kzalloc(g, sizeof(*priv)); | ||
289 | if (!priv) | ||
290 | return -ENOMEM; | ||
291 | |||
292 | ch->os_priv = priv; | ||
293 | priv->ch = ch; | ||
294 | |||
295 | #ifdef CONFIG_SYNC | ||
296 | ch->has_os_fence_framework_support = true; | ||
297 | #endif | ||
298 | |||
299 | err = nvgpu_mutex_init(&priv->error_notifier.mutex); | ||
300 | if (err) { | ||
301 | nvgpu_kfree(g, priv); | ||
302 | return err; | ||
303 | } | ||
304 | |||
305 | nvgpu_channel_work_completion_init(ch); | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | static void nvgpu_channel_free_linux(struct gk20a *g, struct channel_gk20a *ch) | ||
311 | { | ||
312 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
313 | |||
314 | nvgpu_mutex_destroy(&priv->error_notifier.mutex); | ||
315 | nvgpu_kfree(g, priv); | ||
316 | |||
317 | ch->os_priv = NULL; | ||
318 | |||
319 | #ifdef CONFIG_SYNC | ||
320 | ch->has_os_fence_framework_support = false; | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | static int nvgpu_channel_init_os_fence_framework(struct channel_gk20a *ch, | ||
325 | const char *fmt, ...) | ||
326 | { | ||
327 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
328 | struct nvgpu_os_fence_framework *fence_framework; | ||
329 | char name[30]; | ||
330 | va_list args; | ||
331 | |||
332 | fence_framework = &priv->fence_framework; | ||
333 | |||
334 | va_start(args, fmt); | ||
335 | vsnprintf(name, sizeof(name), fmt, args); | ||
336 | va_end(args); | ||
337 | |||
338 | fence_framework->timeline = gk20a_sync_timeline_create(name); | ||
339 | |||
340 | if (!fence_framework->timeline) | ||
341 | return -EINVAL; | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | static void nvgpu_channel_signal_os_fence_framework(struct channel_gk20a *ch) | ||
346 | { | ||
347 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
348 | struct nvgpu_os_fence_framework *fence_framework; | ||
349 | |||
350 | fence_framework = &priv->fence_framework; | ||
351 | |||
352 | gk20a_sync_timeline_signal(fence_framework->timeline); | ||
353 | } | ||
354 | |||
355 | static void nvgpu_channel_destroy_os_fence_framework(struct channel_gk20a *ch) | ||
356 | { | ||
357 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
358 | struct nvgpu_os_fence_framework *fence_framework; | ||
359 | |||
360 | fence_framework = &priv->fence_framework; | ||
361 | |||
362 | gk20a_sync_timeline_destroy(fence_framework->timeline); | ||
363 | fence_framework->timeline = NULL; | ||
364 | } | ||
365 | |||
366 | static bool nvgpu_channel_fence_framework_exists(struct channel_gk20a *ch) | ||
367 | { | ||
368 | struct nvgpu_channel_linux *priv = ch->os_priv; | ||
369 | struct nvgpu_os_fence_framework *fence_framework; | ||
370 | |||
371 | fence_framework = &priv->fence_framework; | ||
372 | |||
373 | return (fence_framework->timeline != NULL); | ||
374 | } | ||
375 | |||
376 | static int nvgpu_channel_copy_user_gpfifo(struct nvgpu_gpfifo_entry *dest, | ||
377 | struct nvgpu_gpfifo_userdata userdata, u32 start, u32 length) | ||
378 | { | ||
379 | struct nvgpu_gpfifo_entry __user *user_gpfifo = userdata.entries; | ||
380 | unsigned long n; | ||
381 | |||
382 | n = copy_from_user(dest, user_gpfifo + start, | ||
383 | length * sizeof(struct nvgpu_gpfifo_entry)); | ||
384 | |||
385 | return n == 0 ? 0 : -EFAULT; | ||
386 | } | ||
387 | |||
388 | int nvgpu_usermode_buf_from_dmabuf(struct gk20a *g, int dmabuf_fd, | ||
389 | struct nvgpu_mem *mem, struct nvgpu_usermode_buf_linux *buf) | ||
390 | { | ||
391 | struct device *dev = dev_from_gk20a(g); | ||
392 | struct dma_buf *dmabuf; | ||
393 | struct sg_table *sgt; | ||
394 | struct dma_buf_attachment *attachment; | ||
395 | int err; | ||
396 | |||
397 | dmabuf = dma_buf_get(dmabuf_fd); | ||
398 | if (IS_ERR(dmabuf)) { | ||
399 | return PTR_ERR(dmabuf); | ||
400 | } | ||
401 | |||
402 | if (gk20a_dmabuf_aperture(g, dmabuf) == APERTURE_INVALID) { | ||
403 | err = -EINVAL; | ||
404 | goto put_dmabuf; | ||
405 | } | ||
406 | |||
407 | err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev); | ||
408 | if (err != 0) { | ||
409 | goto put_dmabuf; | ||
410 | } | ||
411 | |||
412 | sgt = gk20a_mm_pin(dev, dmabuf, &attachment); | ||
413 | if (IS_ERR(sgt)) { | ||
414 | nvgpu_warn(g, "Failed to pin dma_buf!"); | ||
415 | err = PTR_ERR(sgt); | ||
416 | goto put_dmabuf; | ||
417 | } | ||
418 | |||
419 | buf->dmabuf = dmabuf; | ||
420 | buf->attachment = attachment; | ||
421 | buf->sgt = sgt; | ||
422 | |||
423 | /* | ||
424 | * This mem is unmapped and freed in a common path; for Linux, we'll | ||
425 | * also need to unref the dmabuf stuff (above) but the sgt here is only | ||
426 | * borrowed, so it cannot be freed by nvgpu_mem_*. | ||
427 | */ | ||
428 | mem->mem_flags = NVGPU_MEM_FLAG_FOREIGN_SGT; | ||
429 | mem->aperture = APERTURE_SYSMEM; | ||
430 | mem->skip_wmb = 0; | ||
431 | mem->size = dmabuf->size; | ||
432 | |||
433 | mem->priv.flags = 0; | ||
434 | mem->priv.pages = NULL; | ||
435 | mem->priv.sgt = sgt; | ||
436 | |||
437 | return 0; | ||
438 | put_dmabuf: | ||
439 | dma_buf_put(dmabuf); | ||
440 | return err; | ||
441 | } | ||
442 | |||
443 | void nvgpu_channel_free_usermode_buffers(struct channel_gk20a *c) | ||
444 | { | ||
445 | struct nvgpu_channel_linux *priv = c->os_priv; | ||
446 | struct gk20a *g = c->g; | ||
447 | struct device *dev = dev_from_gk20a(g); | ||
448 | |||
449 | if (priv->usermode.gpfifo.dmabuf != NULL) { | ||
450 | gk20a_mm_unpin(dev, priv->usermode.gpfifo.dmabuf, | ||
451 | priv->usermode.gpfifo.attachment, | ||
452 | priv->usermode.gpfifo.sgt); | ||
453 | dma_buf_put(priv->usermode.gpfifo.dmabuf); | ||
454 | priv->usermode.gpfifo.dmabuf = NULL; | ||
455 | } | ||
456 | |||
457 | if (priv->usermode.userd.dmabuf != NULL) { | ||
458 | gk20a_mm_unpin(dev, priv->usermode.userd.dmabuf, | ||
459 | priv->usermode.userd.attachment, | ||
460 | priv->usermode.userd.sgt); | ||
461 | dma_buf_put(priv->usermode.userd.dmabuf); | ||
462 | priv->usermode.userd.dmabuf = NULL; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | static int nvgpu_channel_alloc_usermode_buffers(struct channel_gk20a *c, | ||
467 | struct nvgpu_setup_bind_args *args) | ||
468 | { | ||
469 | struct nvgpu_channel_linux *priv = c->os_priv; | ||
470 | struct gk20a *g = c->g; | ||
471 | struct device *dev = dev_from_gk20a(g); | ||
472 | size_t gpfifo_size; | ||
473 | int err; | ||
474 | |||
475 | if (args->gpfifo_dmabuf_fd == 0 || args->userd_dmabuf_fd == 0) { | ||
476 | return -EINVAL; | ||
477 | } | ||
478 | |||
479 | if (args->gpfifo_dmabuf_offset != 0 || | ||
480 | args->userd_dmabuf_offset != 0) { | ||
481 | /* TODO - not yet supported */ | ||
482 | return -EINVAL; | ||
483 | } | ||
484 | |||
485 | err = nvgpu_usermode_buf_from_dmabuf(g, args->gpfifo_dmabuf_fd, | ||
486 | &c->usermode_gpfifo, &priv->usermode.gpfifo); | ||
487 | if (err < 0) { | ||
488 | return err; | ||
489 | } | ||
490 | |||
491 | gpfifo_size = max_t(u32, SZ_4K, | ||
492 | args->num_gpfifo_entries * | ||
493 | nvgpu_get_gpfifo_entry_size()); | ||
494 | |||
495 | if (c->usermode_gpfifo.size < gpfifo_size) { | ||
496 | err = -EINVAL; | ||
497 | goto free_gpfifo; | ||
498 | } | ||
499 | |||
500 | c->usermode_gpfifo.gpu_va = nvgpu_gmmu_map(c->vm, &c->usermode_gpfifo, | ||
501 | c->usermode_gpfifo.size, 0, gk20a_mem_flag_none, | ||
502 | false, c->usermode_gpfifo.aperture); | ||
503 | |||
504 | if (c->usermode_gpfifo.gpu_va == 0) { | ||
505 | err = -ENOMEM; | ||
506 | goto unmap_free_gpfifo; | ||
507 | } | ||
508 | |||
509 | err = nvgpu_usermode_buf_from_dmabuf(g, args->userd_dmabuf_fd, | ||
510 | &c->usermode_userd, &priv->usermode.userd); | ||
511 | if (err < 0) { | ||
512 | goto unmap_free_gpfifo; | ||
513 | } | ||
514 | |||
515 | args->work_submit_token = g->fifo.channel_base + c->chid; | ||
516 | |||
517 | return 0; | ||
518 | unmap_free_gpfifo: | ||
519 | nvgpu_dma_unmap_free(c->vm, &c->usermode_gpfifo); | ||
520 | free_gpfifo: | ||
521 | gk20a_mm_unpin(dev, priv->usermode.gpfifo.dmabuf, | ||
522 | priv->usermode.gpfifo.attachment, | ||
523 | priv->usermode.gpfifo.sgt); | ||
524 | dma_buf_put(priv->usermode.gpfifo.dmabuf); | ||
525 | priv->usermode.gpfifo.dmabuf = NULL; | ||
526 | return err; | ||
527 | } | ||
528 | |||
529 | int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l) | ||
530 | { | ||
531 | struct gk20a *g = &l->g; | ||
532 | struct fifo_gk20a *f = &g->fifo; | ||
533 | int chid; | ||
534 | int err; | ||
535 | |||
536 | for (chid = 0; chid < (int)f->num_channels; chid++) { | ||
537 | struct channel_gk20a *ch = &f->channel[chid]; | ||
538 | |||
539 | err = nvgpu_channel_alloc_linux(g, ch); | ||
540 | if (err) | ||
541 | goto err_clean; | ||
542 | } | ||
543 | |||
544 | g->os_channel.open = nvgpu_channel_open_linux; | ||
545 | g->os_channel.close = nvgpu_channel_close_linux; | ||
546 | g->os_channel.work_completion_signal = | ||
547 | nvgpu_channel_work_completion_signal; | ||
548 | g->os_channel.work_completion_cancel_sync = | ||
549 | nvgpu_channel_work_completion_cancel_sync; | ||
550 | |||
551 | g->os_channel.os_fence_framework_inst_exists = | ||
552 | nvgpu_channel_fence_framework_exists; | ||
553 | g->os_channel.init_os_fence_framework = | ||
554 | nvgpu_channel_init_os_fence_framework; | ||
555 | g->os_channel.signal_os_fence_framework = | ||
556 | nvgpu_channel_signal_os_fence_framework; | ||
557 | g->os_channel.destroy_os_fence_framework = | ||
558 | nvgpu_channel_destroy_os_fence_framework; | ||
559 | |||
560 | g->os_channel.copy_user_gpfifo = | ||
561 | nvgpu_channel_copy_user_gpfifo; | ||
562 | |||
563 | g->os_channel.alloc_usermode_buffers = | ||
564 | nvgpu_channel_alloc_usermode_buffers; | ||
565 | |||
566 | g->os_channel.free_usermode_buffers = | ||
567 | nvgpu_channel_free_usermode_buffers; | ||
568 | |||
569 | return 0; | ||
570 | |||
571 | err_clean: | ||
572 | for (; chid >= 0; chid--) { | ||
573 | struct channel_gk20a *ch = &f->channel[chid]; | ||
574 | |||
575 | nvgpu_channel_free_linux(g, ch); | ||
576 | } | ||
577 | return err; | ||
578 | } | ||
579 | |||
580 | void nvgpu_remove_channel_support_linux(struct nvgpu_os_linux *l) | ||
581 | { | ||
582 | struct gk20a *g = &l->g; | ||
583 | struct fifo_gk20a *f = &g->fifo; | ||
584 | unsigned int chid; | ||
585 | |||
586 | for (chid = 0; chid < f->num_channels; chid++) { | ||
587 | struct channel_gk20a *ch = &f->channel[chid]; | ||
588 | |||
589 | nvgpu_channel_free_linux(g, ch); | ||
590 | } | ||
591 | |||
592 | g->os_channel.os_fence_framework_inst_exists = NULL; | ||
593 | g->os_channel.init_os_fence_framework = NULL; | ||
594 | g->os_channel.signal_os_fence_framework = NULL; | ||
595 | g->os_channel.destroy_os_fence_framework = NULL; | ||
596 | } | ||
597 | |||
598 | u32 nvgpu_get_gpfifo_entry_size(void) | ||
599 | { | ||
600 | return sizeof(struct nvgpu_gpfifo_entry); | ||
601 | } | ||
602 | |||
603 | #ifdef CONFIG_DEBUG_FS | ||
604 | static void trace_write_pushbuffer(struct channel_gk20a *c, | ||
605 | struct nvgpu_gpfifo_entry *g) | ||
606 | { | ||
607 | void *mem = NULL; | ||
608 | unsigned int words; | ||
609 | u64 offset; | ||
610 | struct dma_buf *dmabuf = NULL; | ||
611 | |||
612 | if (gk20a_debug_trace_cmdbuf) { | ||
613 | u64 gpu_va = (u64)g->entry0 | | ||
614 | (u64)((u64)pbdma_gp_entry1_get_hi_v(g->entry1) << 32); | ||
615 | int err; | ||
616 | |||
617 | words = pbdma_gp_entry1_length_v(g->entry1); | ||
618 | err = nvgpu_vm_find_buf(c->vm, gpu_va, &dmabuf, &offset); | ||
619 | if (!err) | ||
620 | mem = dma_buf_vmap(dmabuf); | ||
621 | } | ||
622 | |||
623 | if (mem) { | ||
624 | u32 i; | ||
625 | /* | ||
626 | * Write in batches of 128 as there seems to be a limit | ||
627 | * of how much you can output to ftrace at once. | ||
628 | */ | ||
629 | for (i = 0; i < words; i += 128U) { | ||
630 | trace_gk20a_push_cmdbuf( | ||
631 | c->g->name, | ||
632 | 0, | ||
633 | min(words - i, 128U), | ||
634 | offset + i * sizeof(u32), | ||
635 | mem); | ||
636 | } | ||
637 | dma_buf_vunmap(dmabuf, mem); | ||
638 | } | ||
639 | } | ||
640 | |||
641 | void trace_write_pushbuffers(struct channel_gk20a *c, u32 count) | ||
642 | { | ||
643 | struct nvgpu_gpfifo_entry *gp = c->gpfifo.mem.cpu_va; | ||
644 | u32 n = c->gpfifo.entry_num; | ||
645 | u32 start = c->gpfifo.put; | ||
646 | u32 i; | ||
647 | |||
648 | if (!gk20a_debug_trace_cmdbuf) | ||
649 | return; | ||
650 | |||
651 | if (!gp) | ||
652 | return; | ||
653 | |||
654 | for (i = 0; i < count; i++) | ||
655 | trace_write_pushbuffer(c, &gp[(start + i) % n]); | ||
656 | } | ||
657 | #endif | ||