aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/vgpu/vgpu_linux.c
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
commitf347fde22f1297e4f022600d201780d5ead78114 (patch)
tree76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/vgpu/vgpu_linux.c
parent8340d234d78a7d0f46c11a584de538148b78b7cb (diff)
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/vgpu/vgpu_linux.c')
-rw-r--r--include/os/linux/vgpu/vgpu_linux.c525
1 files changed, 0 insertions, 525 deletions
diff --git a/include/os/linux/vgpu/vgpu_linux.c b/include/os/linux/vgpu/vgpu_linux.c
deleted file mode 100644
index 80bcfff..0000000
--- a/include/os/linux/vgpu/vgpu_linux.c
+++ /dev/null
@@ -1,525 +0,0 @@
1/*
2 * Virtualized GPU for Linux
3 *
4 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/mm.h>
20#include <linux/slab.h>
21#include <linux/dma-mapping.h>
22#include <linux/pm_runtime.h>
23#include <linux/pm_qos.h>
24#include <linux/platform_device.h>
25#include <soc/tegra/chip-id.h>
26
27#include <nvgpu/kmem.h>
28#include <nvgpu/bug.h>
29#include <nvgpu/enabled.h>
30#include <nvgpu/debug.h>
31#include <nvgpu/soc.h>
32#include <nvgpu/ctxsw_trace.h>
33#include <nvgpu/defaults.h>
34#include <nvgpu/ltc.h>
35#include <nvgpu/channel.h>
36#include <nvgpu/clk_arb.h>
37
38#include "vgpu_linux.h"
39#include "vgpu/fecs_trace_vgpu.h"
40#include "vgpu/clk_vgpu.h"
41#include "gk20a/regops_gk20a.h"
42#include "gm20b/hal_gm20b.h"
43
44#include "os/linux/module.h"
45#include "os/linux/os_linux.h"
46#include "os/linux/ioctl.h"
47#include "os/linux/scale.h"
48#include "os/linux/driver_common.h"
49#include "os/linux/platform_gk20a.h"
50#include "os/linux/vgpu/platform_vgpu_tegra.h"
51
52struct vgpu_priv_data *vgpu_get_priv_data(struct gk20a *g)
53{
54 struct gk20a_platform *plat = gk20a_get_platform(dev_from_gk20a(g));
55
56 return (struct vgpu_priv_data *)plat->vgpu_priv;
57}
58
59static void vgpu_remove_support(struct gk20a *g)
60{
61 vgpu_remove_support_common(g);
62}
63
64static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
65{
66 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
67 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
68
69 nvgpu_mutex_init(&g->power_lock);
70 nvgpu_mutex_init(&g->ctxsw_disable_lock);
71 nvgpu_mutex_init(&g->clk_arb_enable_lock);
72 nvgpu_mutex_init(&g->cg_pg_lock);
73
74 nvgpu_mutex_init(&priv->vgpu_clk_get_freq_lock);
75
76 nvgpu_mutex_init(&l->ctrl.privs_lock);
77 nvgpu_init_list_node(&l->ctrl.privs);
78
79 l->regs_saved = l->regs;
80 l->bar1_saved = l->bar1;
81
82 nvgpu_atomic_set(&g->clk_arb_global_nr, 0);
83
84 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
85 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
86 __nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, platform->has_syncpoints);
87 g->ptimer_src_freq = platform->ptimer_src_freq;
88 __nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, platform->can_railgate_init);
89 g->railgate_delay = platform->railgate_delay_init;
90
91 __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
92 platform->unify_address_spaces);
93}
94
95static int vgpu_init_support(struct platform_device *pdev)
96{
97 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
98 struct gk20a *g = get_gk20a(&pdev->dev);
99 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
100 void __iomem *regs;
101 int err = 0;
102
103 if (!r) {
104 nvgpu_err(g, "failed to get gk20a bar1");
105 err = -ENXIO;
106 goto fail;
107 }
108
109 if (r->name && !strcmp(r->name, "/vgpu")) {
110 regs = devm_ioremap_resource(&pdev->dev, r);
111 if (IS_ERR(regs)) {
112 nvgpu_err(g, "failed to remap gk20a bar1");
113 err = PTR_ERR(regs);
114 goto fail;
115 }
116 l->bar1 = regs;
117 l->bar1_mem = r;
118 }
119
120 nvgpu_mutex_init(&g->dbg_sessions_lock);
121 nvgpu_mutex_init(&g->client_lock);
122
123 nvgpu_init_list_node(&g->profiler_objects);
124
125 g->dbg_regops_tmp_buf = nvgpu_kzalloc(g, SZ_4K);
126 if (!g->dbg_regops_tmp_buf) {
127 nvgpu_err(g, "couldn't allocate regops tmp buf");
128 return -ENOMEM;
129 }
130 g->dbg_regops_tmp_buf_ops =
131 SZ_4K / sizeof(g->dbg_regops_tmp_buf[0]);
132
133 g->remove_support = vgpu_remove_support;
134 return 0;
135
136 fail:
137 vgpu_remove_support(g);
138 return err;
139}
140
141int vgpu_pm_prepare_poweroff(struct device *dev)
142{
143 struct gk20a *g = get_gk20a(dev);
144 int ret = 0;
145
146 nvgpu_log_fn(g, " ");
147
148 nvgpu_mutex_acquire(&g->power_lock);
149
150 if (!g->power_on)
151 goto done;
152
153 if (g->ops.fifo.channel_suspend)
154 ret = g->ops.fifo.channel_suspend(g);
155 if (ret)
156 goto done;
157
158 g->power_on = false;
159 done:
160 nvgpu_mutex_release(&g->power_lock);
161
162 return ret;
163}
164
165int vgpu_pm_finalize_poweron(struct device *dev)
166{
167 struct gk20a *g = get_gk20a(dev);
168 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
169 int err = 0;
170
171 nvgpu_log_fn(g, " ");
172
173 nvgpu_mutex_acquire(&g->power_lock);
174
175 if (g->power_on)
176 goto done;
177
178 g->power_on = true;
179
180 vgpu_detect_chip(g);
181 err = vgpu_init_hal(g);
182 if (err)
183 goto done;
184
185 if (g->ops.ltc.init_fs_state)
186 g->ops.ltc.init_fs_state(g);
187
188 err = nvgpu_init_ltc_support(g);
189 if (err) {
190 nvgpu_err(g, "failed to init ltc");
191 goto done;
192 }
193
194 err = vgpu_init_mm_support(g);
195 if (err) {
196 nvgpu_err(g, "failed to init gk20a mm");
197 goto done;
198 }
199
200 err = vgpu_init_fifo_support(g);
201 if (err) {
202 nvgpu_err(g, "failed to init gk20a fifo");
203 goto done;
204 }
205
206 err = vgpu_init_gr_support(g);
207 if (err) {
208 nvgpu_err(g, "failed to init gk20a gr");
209 goto done;
210 }
211
212 err = nvgpu_clk_arb_init_arbiter(g);
213 if (err) {
214 nvgpu_err(g, "failed to init clk arb");
215 goto done;
216 }
217
218 err = g->ops.chip_init_gpu_characteristics(g);
219 if (err) {
220 nvgpu_err(g, "failed to init gk20a gpu characteristics");
221 goto done;
222 }
223
224 err = nvgpu_finalize_poweron_linux(l);
225 if (err)
226 goto done;
227
228#ifdef CONFIG_GK20A_CTXSW_TRACE
229 gk20a_ctxsw_trace_init(g);
230#endif
231 gk20a_sched_ctrl_init(g);
232 gk20a_channel_resume(g);
233
234 g->sw_ready = true;
235
236done:
237 if (err)
238 g->power_on = false;
239
240 nvgpu_mutex_release(&g->power_lock);
241 return err;
242}
243
244static int vgpu_qos_notify(struct notifier_block *nb,
245 unsigned long n, void *data)
246{
247 struct gk20a_scale_profile *profile =
248 container_of(nb, struct gk20a_scale_profile,
249 qos_notify_block);
250 struct gk20a *g = get_gk20a(profile->dev);
251 u32 max_freq;
252 int err;
253
254 nvgpu_log_fn(g, " ");
255
256 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
257 err = vgpu_plat_clk_cap_rate(profile->dev, max_freq);
258 if (err)
259 nvgpu_err(g, "%s failed, err=%d", __func__, err);
260
261 return NOTIFY_OK; /* need notify call further */
262}
263
264static int vgpu_pm_qos_init(struct device *dev)
265{
266 struct gk20a *g = get_gk20a(dev);
267 struct gk20a_scale_profile *profile = g->scale_profile;
268
269 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ)) {
270 if (!profile)
271 return -EINVAL;
272 } else {
273 profile = nvgpu_kzalloc(g, sizeof(*profile));
274 if (!profile)
275 return -ENOMEM;
276 g->scale_profile = profile;
277 }
278
279 profile->dev = dev;
280 profile->qos_notify_block.notifier_call = vgpu_qos_notify;
281 pm_qos_add_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
282 &profile->qos_notify_block);
283 return 0;
284}
285
286static void vgpu_pm_qos_remove(struct device *dev)
287{
288 struct gk20a *g = get_gk20a(dev);
289
290 pm_qos_remove_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
291 &g->scale_profile->qos_notify_block);
292 nvgpu_kfree(g, g->scale_profile);
293 g->scale_profile = NULL;
294}
295
296static int vgpu_pm_init(struct device *dev)
297{
298 struct gk20a *g = get_gk20a(dev);
299 struct gk20a_platform *platform = gk20a_get_platform(dev);
300 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
301 unsigned long *freqs;
302 int num_freqs;
303 int err = 0;
304
305 nvgpu_log_fn(g, " ");
306
307 if (nvgpu_platform_is_simulation(g))
308 return 0;
309
310 __pm_runtime_disable(dev, false);
311
312 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
313 gk20a_scale_init(dev);
314
315 if (l->devfreq) {
316 /* set min/max frequency based on frequency table */
317 err = platform->get_clk_freqs(dev, &freqs, &num_freqs);
318 if (err)
319 return err;
320
321 if (num_freqs < 1)
322 return -EINVAL;
323
324 l->devfreq->min_freq = freqs[0];
325 l->devfreq->max_freq = freqs[num_freqs - 1];
326 }
327
328 err = vgpu_pm_qos_init(dev);
329 if (err)
330 return err;
331
332 return err;
333}
334
335int vgpu_probe(struct platform_device *pdev)
336{
337 struct nvgpu_os_linux *l;
338 struct gk20a *gk20a;
339 int err;
340 struct device *dev = &pdev->dev;
341 struct gk20a_platform *platform = gk20a_get_platform(dev);
342 struct vgpu_priv_data *priv;
343
344 if (!platform) {
345 dev_err(dev, "no platform data\n");
346 return -ENODATA;
347 }
348
349 l = kzalloc(sizeof(*l), GFP_KERNEL);
350 if (!l) {
351 dev_err(dev, "couldn't allocate gk20a support");
352 return -ENOMEM;
353 }
354 gk20a = &l->g;
355
356 nvgpu_log_fn(gk20a, " ");
357
358 nvgpu_init_gk20a(gk20a);
359
360 nvgpu_kmem_init(gk20a);
361
362 err = nvgpu_init_enabled_flags(gk20a);
363 if (err) {
364 kfree(gk20a);
365 return err;
366 }
367
368 l->dev = dev;
369 if (tegra_platform_is_vdk())
370 __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
371
372 gk20a->is_virtual = true;
373
374 priv = nvgpu_kzalloc(gk20a, sizeof(*priv));
375 if (!priv) {
376 kfree(gk20a);
377 return -ENOMEM;
378 }
379
380 platform->g = gk20a;
381 platform->vgpu_priv = priv;
382
383 err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class);
384 if (err)
385 return err;
386
387 vgpu_init_support(pdev);
388
389 vgpu_init_vars(gk20a, platform);
390
391 init_rwsem(&l->busy_lock);
392
393 nvgpu_spinlock_init(&gk20a->mc_enable_lock);
394
395 gk20a->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms;
396
397 /* Initialize the platform interface. */
398 err = platform->probe(dev);
399 if (err) {
400 if (err == -EPROBE_DEFER)
401 nvgpu_info(gk20a, "platform probe failed");
402 else
403 nvgpu_err(gk20a, "platform probe failed");
404 return err;
405 }
406
407 if (platform->late_probe) {
408 err = platform->late_probe(dev);
409 if (err) {
410 nvgpu_err(gk20a, "late probe failed");
411 return err;
412 }
413 }
414
415 err = vgpu_comm_init(gk20a);
416 if (err) {
417 nvgpu_err(gk20a, "failed to init comm interface");
418 return -ENOSYS;
419 }
420
421 priv->virt_handle = vgpu_connect();
422 if (!priv->virt_handle) {
423 nvgpu_err(gk20a, "failed to connect to server node");
424 vgpu_comm_deinit();
425 return -ENOSYS;
426 }
427
428 err = vgpu_get_constants(gk20a);
429 if (err) {
430 vgpu_comm_deinit();
431 return err;
432 }
433
434 err = vgpu_pm_init(dev);
435 if (err) {
436 nvgpu_err(gk20a, "pm init failed");
437 return err;
438 }
439
440 err = nvgpu_thread_create(&priv->intr_handler, gk20a,
441 vgpu_intr_thread, "gk20a");
442 if (err)
443 return err;
444
445 gk20a_debug_init(gk20a, "gpu.0");
446
447 /* Set DMA parameters to allow larger sgt lists */
448 dev->dma_parms = &l->dma_parms;
449 dma_set_max_seg_size(dev, UINT_MAX);
450
451 gk20a->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT;
452 gk20a->timeouts_disabled_by_user = false;
453 nvgpu_atomic_set(&gk20a->timeouts_disabled_refcount, 0);
454
455 vgpu_create_sysfs(dev);
456 gk20a_init_gr(gk20a);
457
458 nvgpu_log_info(gk20a, "total ram pages : %lu", totalram_pages);
459 gk20a->gr.max_comptag_mem = totalram_size_in_mb;
460
461 nvgpu_ref_init(&gk20a->refcount);
462
463 return 0;
464}
465
466int vgpu_remove(struct platform_device *pdev)
467{
468 struct device *dev = &pdev->dev;
469 struct gk20a *g = get_gk20a(dev);
470
471 nvgpu_log_fn(g, " ");
472
473 vgpu_pm_qos_remove(dev);
474 if (g->remove_support)
475 g->remove_support(g);
476
477 vgpu_comm_deinit();
478 gk20a_sched_ctrl_cleanup(g);
479 gk20a_user_deinit(dev, &nvgpu_class);
480 vgpu_remove_sysfs(dev);
481 gk20a_get_platform(dev)->g = NULL;
482 gk20a_put(g);
483
484 return 0;
485}
486
487bool vgpu_is_reduced_bar1(struct gk20a *g)
488{
489 struct fifo_gk20a *f = &g->fifo;
490 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
491
492 return resource_size(l->bar1_mem) == (resource_size_t)f->userd.size;
493}
494
495int vgpu_tegra_suspend(struct device *dev)
496{
497 struct tegra_vgpu_cmd_msg msg = {};
498 struct gk20a *g = get_gk20a(dev);
499 int err = 0;
500
501 msg.cmd = TEGRA_VGPU_CMD_SUSPEND;
502 msg.handle = vgpu_get_handle(g);
503 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
504 err = err ? err : msg.ret;
505 if (err)
506 nvgpu_err(g, "vGPU suspend failed\n");
507
508 return err;
509}
510
511int vgpu_tegra_resume(struct device *dev)
512{
513 struct tegra_vgpu_cmd_msg msg = {};
514 struct gk20a *g = get_gk20a(dev);
515 int err = 0;
516
517 msg.cmd = TEGRA_VGPU_CMD_RESUME;
518 msg.handle = vgpu_get_handle(g);
519 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
520 err = err ? err : msg.ret;
521 if (err)
522 nvgpu_err(g, "vGPU resume failed\n");
523
524 return err;
525}