diff options
author | Richard Zhao <rizhao@nvidia.com> | 2018-01-29 18:43:50 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-02-27 17:30:32 -0500 |
commit | 8202be50ce80e7fc2cf851a1ec4ad8f9378e3306 (patch) | |
tree | e0b5d6fcd4a9a71c01569cc5a23e67cb2e751628 /drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c | |
parent | 28abb3d2cd3a9efe413be3c2f8e897ccd72f6b93 (diff) |
gpu: nvgpu: vgpu: split vgpu.c into vgpu.c and vgpu_linux.c
vgpu.c will keep common code whil vgpu_linux.c is linux specific.
Jira EVLR-2364
Change-Id: Ice9782fa96c256f1b70320886d3720ab0db26244
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1649943
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c | 451 |
1 files changed, 12 insertions, 439 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c index 7cf161e0..7915a599 100644 --- a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Virtualized GPU | ||
3 | * | ||
4 | * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 3 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
@@ -16,40 +14,15 @@ | |||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 15 | */ |
18 | 16 | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/pm_runtime.h> | ||
22 | #include <linux/pm_qos.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <soc/tegra/chip-id.h> | ||
25 | #include <uapi/linux/nvgpu.h> | ||
26 | |||
27 | #include <nvgpu/kmem.h> | ||
28 | #include <nvgpu/bug.h> | ||
29 | #include <nvgpu/enabled.h> | 17 | #include <nvgpu/enabled.h> |
30 | #include <nvgpu/debug.h> | ||
31 | #include <nvgpu/bus.h> | 18 | #include <nvgpu/bus.h> |
32 | #include <nvgpu/soc.h> | 19 | #include <nvgpu/vgpu/vgpu_ivc.h> |
33 | #include <nvgpu/ctxsw_trace.h> | ||
34 | #include <nvgpu/defaults.h> | ||
35 | 20 | ||
21 | #include "gk20a/gk20a.h" | ||
36 | #include "vgpu.h" | 22 | #include "vgpu.h" |
37 | #include "fecs_trace_vgpu.h" | 23 | #include "fecs_trace_vgpu.h" |
38 | #include "clk_vgpu.h" | ||
39 | #include "gk20a/tsg_gk20a.h" | ||
40 | #include "gk20a/channel_gk20a.h" | ||
41 | #include "gk20a/regops_gk20a.h" | ||
42 | #include "gm20b/hal_gm20b.h" | ||
43 | |||
44 | #include "common/linux/module.h" | ||
45 | #include "common/linux/os_linux.h" | ||
46 | #include "common/linux/ioctl.h" | ||
47 | #include "common/linux/scale.h" | ||
48 | #include "common/linux/driver_common.h" | ||
49 | 24 | ||
50 | #include <nvgpu/hw/gk20a/hw_mc_gk20a.h> | 25 | int vgpu_comm_init(struct gk20a *g) |
51 | |||
52 | static inline int vgpu_comm_init(struct gk20a *g) | ||
53 | { | 26 | { |
54 | size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES }; | 27 | size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES }; |
55 | 28 | ||
@@ -57,7 +30,7 @@ static inline int vgpu_comm_init(struct gk20a *g) | |||
57 | ARRAY_SIZE(queue_sizes)); | 30 | ARRAY_SIZE(queue_sizes)); |
58 | } | 31 | } |
59 | 32 | ||
60 | static inline void vgpu_comm_deinit(void) | 33 | void vgpu_comm_deinit(void) |
61 | { | 34 | { |
62 | size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES }; | 35 | size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES }; |
63 | 36 | ||
@@ -83,7 +56,7 @@ int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in, | |||
83 | return err; | 56 | return err; |
84 | } | 57 | } |
85 | 58 | ||
86 | static u64 vgpu_connect(void) | 59 | u64 vgpu_connect(void) |
87 | { | 60 | { |
88 | struct tegra_vgpu_cmd_msg msg; | 61 | struct tegra_vgpu_cmd_msg msg; |
89 | struct tegra_vgpu_connect_params *p = &msg.params.connect; | 62 | struct tegra_vgpu_connect_params *p = &msg.params.connect; |
@@ -125,7 +98,7 @@ static void vgpu_handle_channel_event(struct gk20a *g, | |||
125 | } | 98 | } |
126 | 99 | ||
127 | if (info->id >= g->fifo.num_channels || | 100 | if (info->id >= g->fifo.num_channels || |
128 | info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) { | 101 | info->event_id >= TEGRA_VGPU_CHANNEL_EVENT_ID_MAX) { |
129 | nvgpu_err(g, "invalid channel event"); | 102 | nvgpu_err(g, "invalid channel event"); |
130 | return; | 103 | return; |
131 | } | 104 | } |
@@ -135,9 +108,7 @@ static void vgpu_handle_channel_event(struct gk20a *g, | |||
135 | gk20a_tsg_event_id_post_event(tsg, info->event_id); | 108 | gk20a_tsg_event_id_post_event(tsg, info->event_id); |
136 | } | 109 | } |
137 | 110 | ||
138 | 111 | int vgpu_intr_thread(void *dev_id) | |
139 | |||
140 | static int vgpu_intr_thread(void *dev_id) | ||
141 | { | 112 | { |
142 | struct gk20a *g = dev_id; | 113 | struct gk20a *g = dev_id; |
143 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | 114 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); |
@@ -201,11 +172,9 @@ static int vgpu_intr_thread(void *dev_id) | |||
201 | return 0; | 172 | return 0; |
202 | } | 173 | } |
203 | 174 | ||
204 | static void vgpu_remove_support(struct gk20a *g) | 175 | void vgpu_remove_support_common(struct gk20a *g) |
205 | { | 176 | { |
206 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | 177 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); |
207 | struct vgpu_priv_data *priv = | ||
208 | vgpu_get_priv_data_from_dev(dev_from_gk20a(g)); | ||
209 | struct tegra_vgpu_intr_msg msg; | 178 | struct tegra_vgpu_intr_msg msg; |
210 | int err; | 179 | int err; |
211 | 180 | ||
@@ -229,104 +198,9 @@ static void vgpu_remove_support(struct gk20a *g) | |||
229 | &msg, sizeof(msg)); | 198 | &msg, sizeof(msg)); |
230 | WARN_ON(err); | 199 | WARN_ON(err); |
231 | nvgpu_thread_stop(&priv->intr_handler); | 200 | nvgpu_thread_stop(&priv->intr_handler); |
232 | |||
233 | /* free mappings to registers, etc*/ | ||
234 | |||
235 | if (l->bar1) { | ||
236 | iounmap(l->bar1); | ||
237 | l->bar1 = NULL; | ||
238 | } | ||
239 | } | ||
240 | |||
241 | static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform) | ||
242 | { | ||
243 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
244 | |||
245 | nvgpu_mutex_init(&g->poweron_lock); | ||
246 | nvgpu_mutex_init(&g->poweroff_lock); | ||
247 | l->regs_saved = l->regs; | ||
248 | l->bar1_saved = l->bar1; | ||
249 | |||
250 | nvgpu_init_list_node(&g->pending_sema_waits); | ||
251 | nvgpu_raw_spinlock_init(&g->pending_sema_waits_lock); | ||
252 | |||
253 | g->aggressive_sync_destroy = platform->aggressive_sync_destroy; | ||
254 | g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh; | ||
255 | g->has_syncpoints = platform->has_syncpoints; | ||
256 | g->ptimer_src_freq = platform->ptimer_src_freq; | ||
257 | g->can_railgate = platform->can_railgate_init; | ||
258 | g->railgate_delay = platform->railgate_delay_init; | ||
259 | |||
260 | __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES, | ||
261 | platform->unify_address_spaces); | ||
262 | } | ||
263 | |||
264 | static int vgpu_init_support(struct platform_device *pdev) | ||
265 | { | ||
266 | struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
267 | struct gk20a *g = get_gk20a(&pdev->dev); | ||
268 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
269 | void __iomem *regs; | ||
270 | int err = 0; | ||
271 | |||
272 | if (!r) { | ||
273 | nvgpu_err(g, "failed to get gk20a bar1"); | ||
274 | err = -ENXIO; | ||
275 | goto fail; | ||
276 | } | ||
277 | |||
278 | if (r->name && !strcmp(r->name, "/vgpu")) { | ||
279 | regs = devm_ioremap_resource(&pdev->dev, r); | ||
280 | if (IS_ERR(regs)) { | ||
281 | nvgpu_err(g, "failed to remap gk20a bar1"); | ||
282 | err = PTR_ERR(regs); | ||
283 | goto fail; | ||
284 | } | ||
285 | l->bar1 = regs; | ||
286 | l->bar1_mem = r; | ||
287 | } | ||
288 | |||
289 | nvgpu_mutex_init(&g->dbg_sessions_lock); | ||
290 | nvgpu_mutex_init(&g->client_lock); | ||
291 | |||
292 | nvgpu_init_list_node(&g->profiler_objects); | ||
293 | |||
294 | g->dbg_regops_tmp_buf = nvgpu_kzalloc(g, SZ_4K); | ||
295 | if (!g->dbg_regops_tmp_buf) { | ||
296 | nvgpu_err(g, "couldn't allocate regops tmp buf"); | ||
297 | return -ENOMEM; | ||
298 | } | ||
299 | g->dbg_regops_tmp_buf_ops = | ||
300 | SZ_4K / sizeof(g->dbg_regops_tmp_buf[0]); | ||
301 | |||
302 | g->remove_support = vgpu_remove_support; | ||
303 | return 0; | ||
304 | |||
305 | fail: | ||
306 | vgpu_remove_support(g); | ||
307 | return err; | ||
308 | } | ||
309 | |||
310 | int vgpu_pm_prepare_poweroff(struct device *dev) | ||
311 | { | ||
312 | struct gk20a *g = get_gk20a(dev); | ||
313 | int ret = 0; | ||
314 | |||
315 | gk20a_dbg_fn(""); | ||
316 | |||
317 | if (!g->power_on) | ||
318 | return 0; | ||
319 | |||
320 | ret = gk20a_channel_suspend(g); | ||
321 | if (ret) | ||
322 | return ret; | ||
323 | |||
324 | g->power_on = false; | ||
325 | |||
326 | return ret; | ||
327 | } | 201 | } |
328 | 202 | ||
329 | static void vgpu_detect_chip(struct gk20a *g) | 203 | void vgpu_detect_chip(struct gk20a *g) |
330 | { | 204 | { |
331 | struct nvgpu_gpu_params *p = &g->params; | 205 | struct nvgpu_gpu_params *p = &g->params; |
332 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | 206 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); |
@@ -417,7 +291,7 @@ int vgpu_get_timestamps_zipper(struct gk20a *g, | |||
417 | return err; | 291 | return err; |
418 | } | 292 | } |
419 | 293 | ||
420 | static int vgpu_init_hal(struct gk20a *g) | 294 | int vgpu_init_hal(struct gk20a *g) |
421 | { | 295 | { |
422 | u32 ver = g->params.gpu_arch + g->params.gpu_impl; | 296 | u32 ver = g->params.gpu_arch + g->params.gpu_impl; |
423 | int err; | 297 | int err; |
@@ -439,158 +313,7 @@ static int vgpu_init_hal(struct gk20a *g) | |||
439 | return err; | 313 | return err; |
440 | } | 314 | } |
441 | 315 | ||
442 | int vgpu_pm_finalize_poweron(struct device *dev) | 316 | int vgpu_get_constants(struct gk20a *g) |
443 | { | ||
444 | struct gk20a *g = get_gk20a(dev); | ||
445 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
446 | int err; | ||
447 | |||
448 | gk20a_dbg_fn(""); | ||
449 | |||
450 | if (g->power_on) | ||
451 | return 0; | ||
452 | |||
453 | g->power_on = true; | ||
454 | |||
455 | vgpu_detect_chip(g); | ||
456 | err = vgpu_init_hal(g); | ||
457 | if (err) | ||
458 | goto done; | ||
459 | |||
460 | if (g->ops.ltc.init_fs_state) | ||
461 | g->ops.ltc.init_fs_state(g); | ||
462 | |||
463 | err = vgpu_init_mm_support(g); | ||
464 | if (err) { | ||
465 | nvgpu_err(g, "failed to init gk20a mm"); | ||
466 | goto done; | ||
467 | } | ||
468 | |||
469 | err = vgpu_init_fifo_support(g); | ||
470 | if (err) { | ||
471 | nvgpu_err(g, "failed to init gk20a fifo"); | ||
472 | goto done; | ||
473 | } | ||
474 | |||
475 | err = vgpu_init_gr_support(g); | ||
476 | if (err) { | ||
477 | nvgpu_err(g, "failed to init gk20a gr"); | ||
478 | goto done; | ||
479 | } | ||
480 | |||
481 | err = g->ops.chip_init_gpu_characteristics(g); | ||
482 | if (err) { | ||
483 | nvgpu_err(g, "failed to init gk20a gpu characteristics"); | ||
484 | goto done; | ||
485 | } | ||
486 | |||
487 | err = nvgpu_finalize_poweron_linux(l); | ||
488 | if (err) | ||
489 | goto done; | ||
490 | |||
491 | #ifdef CONFIG_GK20A_CTXSW_TRACE | ||
492 | gk20a_ctxsw_trace_init(g); | ||
493 | #endif | ||
494 | gk20a_sched_ctrl_init(g); | ||
495 | gk20a_channel_resume(g); | ||
496 | |||
497 | g->sw_ready = true; | ||
498 | |||
499 | done: | ||
500 | return err; | ||
501 | } | ||
502 | |||
503 | static int vgpu_qos_notify(struct notifier_block *nb, | ||
504 | unsigned long n, void *data) | ||
505 | { | ||
506 | struct gk20a_scale_profile *profile = | ||
507 | container_of(nb, struct gk20a_scale_profile, | ||
508 | qos_notify_block); | ||
509 | struct gk20a *g = get_gk20a(profile->dev); | ||
510 | u32 max_freq; | ||
511 | int err; | ||
512 | |||
513 | gk20a_dbg_fn(""); | ||
514 | |||
515 | max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS); | ||
516 | err = vgpu_clk_cap_rate(profile->dev, max_freq); | ||
517 | if (err) | ||
518 | nvgpu_err(g, "%s failed, err=%d", __func__, err); | ||
519 | |||
520 | return NOTIFY_OK; /* need notify call further */ | ||
521 | } | ||
522 | |||
523 | static int vgpu_pm_qos_init(struct device *dev) | ||
524 | { | ||
525 | struct gk20a *g = get_gk20a(dev); | ||
526 | struct gk20a_scale_profile *profile = g->scale_profile; | ||
527 | |||
528 | if (IS_ENABLED(CONFIG_GK20A_DEVFREQ)) { | ||
529 | if (!profile) | ||
530 | return -EINVAL; | ||
531 | } else { | ||
532 | profile = nvgpu_kzalloc(g, sizeof(*profile)); | ||
533 | if (!profile) | ||
534 | return -ENOMEM; | ||
535 | g->scale_profile = profile; | ||
536 | } | ||
537 | |||
538 | profile->dev = dev; | ||
539 | profile->qos_notify_block.notifier_call = vgpu_qos_notify; | ||
540 | pm_qos_add_max_notifier(PM_QOS_GPU_FREQ_BOUNDS, | ||
541 | &profile->qos_notify_block); | ||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | static void vgpu_pm_qos_remove(struct device *dev) | ||
546 | { | ||
547 | struct gk20a *g = get_gk20a(dev); | ||
548 | |||
549 | pm_qos_remove_max_notifier(PM_QOS_GPU_FREQ_BOUNDS, | ||
550 | &g->scale_profile->qos_notify_block); | ||
551 | nvgpu_kfree(g, g->scale_profile); | ||
552 | g->scale_profile = NULL; | ||
553 | } | ||
554 | |||
555 | static int vgpu_pm_init(struct device *dev) | ||
556 | { | ||
557 | struct gk20a *g = get_gk20a(dev); | ||
558 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
559 | unsigned long *freqs; | ||
560 | int num_freqs; | ||
561 | int err = 0; | ||
562 | |||
563 | gk20a_dbg_fn(""); | ||
564 | |||
565 | if (nvgpu_platform_is_simulation(g)) | ||
566 | return 0; | ||
567 | |||
568 | __pm_runtime_disable(dev, false); | ||
569 | |||
570 | if (IS_ENABLED(CONFIG_GK20A_DEVFREQ)) | ||
571 | gk20a_scale_init(dev); | ||
572 | |||
573 | if (l->devfreq) { | ||
574 | /* set min/max frequency based on frequency table */ | ||
575 | err = vgpu_clk_get_freqs(dev, &freqs, &num_freqs); | ||
576 | if (err) | ||
577 | return err; | ||
578 | |||
579 | if (num_freqs < 1) | ||
580 | return -EINVAL; | ||
581 | |||
582 | l->devfreq->min_freq = freqs[0]; | ||
583 | l->devfreq->max_freq = freqs[num_freqs - 1]; | ||
584 | } | ||
585 | |||
586 | err = vgpu_pm_qos_init(dev); | ||
587 | if (err) | ||
588 | return err; | ||
589 | |||
590 | return err; | ||
591 | } | ||
592 | |||
593 | static int vgpu_get_constants(struct gk20a *g) | ||
594 | { | 317 | { |
595 | struct tegra_vgpu_cmd_msg msg = {}; | 318 | struct tegra_vgpu_cmd_msg msg = {}; |
596 | struct tegra_vgpu_constants_params *p = &msg.params.constants; | 319 | struct tegra_vgpu_constants_params *p = &msg.params.constants; |
@@ -619,153 +342,3 @@ static int vgpu_get_constants(struct gk20a *g) | |||
619 | priv->constants = *p; | 342 | priv->constants = *p; |
620 | return 0; | 343 | return 0; |
621 | } | 344 | } |
622 | |||
623 | int vgpu_probe(struct platform_device *pdev) | ||
624 | { | ||
625 | struct nvgpu_os_linux *l; | ||
626 | struct gk20a *gk20a; | ||
627 | int err; | ||
628 | struct device *dev = &pdev->dev; | ||
629 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
630 | struct vgpu_priv_data *priv; | ||
631 | |||
632 | if (!platform) { | ||
633 | dev_err(dev, "no platform data\n"); | ||
634 | return -ENODATA; | ||
635 | } | ||
636 | |||
637 | gk20a_dbg_fn(""); | ||
638 | |||
639 | l = kzalloc(sizeof(*l), GFP_KERNEL); | ||
640 | if (!l) { | ||
641 | dev_err(dev, "couldn't allocate gk20a support"); | ||
642 | return -ENOMEM; | ||
643 | } | ||
644 | gk20a = &l->g; | ||
645 | nvgpu_init_gk20a(gk20a); | ||
646 | |||
647 | nvgpu_kmem_init(gk20a); | ||
648 | |||
649 | err = nvgpu_init_enabled_flags(gk20a); | ||
650 | if (err) { | ||
651 | kfree(gk20a); | ||
652 | return err; | ||
653 | } | ||
654 | |||
655 | l->dev = dev; | ||
656 | if (tegra_platform_is_vdk()) | ||
657 | __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true); | ||
658 | |||
659 | gk20a->is_virtual = true; | ||
660 | |||
661 | priv = nvgpu_kzalloc(gk20a, sizeof(*priv)); | ||
662 | if (!priv) { | ||
663 | kfree(gk20a); | ||
664 | return -ENOMEM; | ||
665 | } | ||
666 | |||
667 | platform->g = gk20a; | ||
668 | platform->vgpu_priv = priv; | ||
669 | |||
670 | err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class); | ||
671 | if (err) | ||
672 | return err; | ||
673 | |||
674 | vgpu_init_support(pdev); | ||
675 | |||
676 | vgpu_init_vars(gk20a, platform); | ||
677 | |||
678 | init_rwsem(&l->busy_lock); | ||
679 | |||
680 | nvgpu_spinlock_init(&gk20a->mc_enable_lock); | ||
681 | |||
682 | gk20a->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms; | ||
683 | |||
684 | /* Initialize the platform interface. */ | ||
685 | err = platform->probe(dev); | ||
686 | if (err) { | ||
687 | if (err == -EPROBE_DEFER) | ||
688 | nvgpu_info(gk20a, "platform probe failed"); | ||
689 | else | ||
690 | nvgpu_err(gk20a, "platform probe failed"); | ||
691 | return err; | ||
692 | } | ||
693 | |||
694 | if (platform->late_probe) { | ||
695 | err = platform->late_probe(dev); | ||
696 | if (err) { | ||
697 | nvgpu_err(gk20a, "late probe failed"); | ||
698 | return err; | ||
699 | } | ||
700 | } | ||
701 | |||
702 | err = vgpu_comm_init(gk20a); | ||
703 | if (err) { | ||
704 | nvgpu_err(gk20a, "failed to init comm interface"); | ||
705 | return -ENOSYS; | ||
706 | } | ||
707 | |||
708 | priv->virt_handle = vgpu_connect(); | ||
709 | if (!priv->virt_handle) { | ||
710 | nvgpu_err(gk20a, "failed to connect to server node"); | ||
711 | vgpu_comm_deinit(); | ||
712 | return -ENOSYS; | ||
713 | } | ||
714 | |||
715 | err = vgpu_get_constants(gk20a); | ||
716 | if (err) { | ||
717 | vgpu_comm_deinit(); | ||
718 | return err; | ||
719 | } | ||
720 | |||
721 | err = vgpu_pm_init(dev); | ||
722 | if (err) { | ||
723 | nvgpu_err(gk20a, "pm init failed"); | ||
724 | return err; | ||
725 | } | ||
726 | |||
727 | err = nvgpu_thread_create(&priv->intr_handler, gk20a, | ||
728 | vgpu_intr_thread, "gk20a"); | ||
729 | if (err) | ||
730 | return err; | ||
731 | |||
732 | gk20a_debug_init(gk20a, "gpu.0"); | ||
733 | |||
734 | /* Set DMA parameters to allow larger sgt lists */ | ||
735 | dev->dma_parms = &l->dma_parms; | ||
736 | dma_set_max_seg_size(dev, UINT_MAX); | ||
737 | |||
738 | gk20a->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT; | ||
739 | gk20a->timeouts_enabled = true; | ||
740 | |||
741 | vgpu_create_sysfs(dev); | ||
742 | gk20a_init_gr(gk20a); | ||
743 | |||
744 | gk20a_dbg_info("total ram pages : %lu", totalram_pages); | ||
745 | gk20a->gr.max_comptag_mem = totalram_pages | ||
746 | >> (10 - (PAGE_SHIFT - 10)); | ||
747 | |||
748 | nvgpu_ref_init(&gk20a->refcount); | ||
749 | |||
750 | return 0; | ||
751 | } | ||
752 | |||
753 | int vgpu_remove(struct platform_device *pdev) | ||
754 | { | ||
755 | struct device *dev = &pdev->dev; | ||
756 | struct gk20a *g = get_gk20a(dev); | ||
757 | gk20a_dbg_fn(""); | ||
758 | |||
759 | vgpu_pm_qos_remove(dev); | ||
760 | if (g->remove_support) | ||
761 | g->remove_support(g); | ||
762 | |||
763 | vgpu_comm_deinit(); | ||
764 | gk20a_sched_ctrl_cleanup(g); | ||
765 | gk20a_user_deinit(dev, &nvgpu_class); | ||
766 | vgpu_remove_sysfs(dev); | ||
767 | gk20a_get_platform(dev)->g = NULL; | ||
768 | gk20a_put(g); | ||
769 | |||
770 | return 0; | ||
771 | } | ||