summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-11-14 09:43:28 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 11:27:19 -0500
commitb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (patch)
tree26e2d919f019d15b51bba4d7b5c938f77ad5cff5 /drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
parentb7cc3a2aa6c92a09eed43513287c9062f22ad127 (diff)
gpu: nvgpu: move vgpu code to linux
Most of VGPU code is linux specific but lies in common code So until VGPU code is properly abstracted and made os-independent, move all of VGPU code to linux specific directory Handle corresponding Makefile changes Update all #includes to reflect new paths Add GPL license to newly added linux files Jira NVGPU-387 Change-Id: Ic133e4c80e570bcc273f0dacf45283fefd678923 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599472 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c776
1 files changed, 776 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
new file mode 100644
index 00000000..7768b21d
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
@@ -0,0 +1,776 @@
1/*
2 * Virtualized GPU
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/pm_runtime.h>
22#include <linux/pm_qos.h>
23#include <soc/tegra/chip-id.h>
24#include <uapi/linux/nvgpu.h>
25
26#include <nvgpu/kmem.h>
27#include <nvgpu/bug.h>
28#include <nvgpu/enabled.h>
29#include <nvgpu/debug.h>
30#include <nvgpu/bus.h>
31#include <nvgpu/soc.h>
32#include <nvgpu/ctxsw_trace.h>
33
34#include "vgpu.h"
35#include "fecs_trace_vgpu.h"
36#include "clk_vgpu.h"
37#include "gk20a/tsg_gk20a.h"
38#include "gk20a/channel_gk20a.h"
39#include "gm20b/hal_gm20b.h"
40
41#include "common/linux/module.h"
42#include "common/linux/os_linux.h"
43#include "common/linux/ioctl.h"
44#include "common/linux/scale.h"
45#include "common/linux/driver_common.h"
46
47#ifdef CONFIG_TEGRA_19x_GPU
48#include "common/linux/vgpu/vgpu_t19x.h"
49#include <nvgpu_gpuid_t19x.h>
50#endif
51
52#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
53
54static inline int vgpu_comm_init(struct platform_device *pdev)
55{
56 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
57
58 return tegra_gr_comm_init(pdev, TEGRA_GR_COMM_CTX_CLIENT, 3,
59 queue_sizes, TEGRA_VGPU_QUEUE_CMD,
60 ARRAY_SIZE(queue_sizes));
61}
62
63static inline void vgpu_comm_deinit(void)
64{
65 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
66
67 tegra_gr_comm_deinit(TEGRA_GR_COMM_CTX_CLIENT, TEGRA_VGPU_QUEUE_CMD,
68 ARRAY_SIZE(queue_sizes));
69}
70
71int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
72 size_t size_out)
73{
74 void *handle;
75 size_t size = size_in;
76 void *data = msg;
77 int err;
78
79 err = tegra_gr_comm_sendrecv(TEGRA_GR_COMM_CTX_CLIENT,
80 tegra_gr_comm_get_server_vmid(),
81 TEGRA_VGPU_QUEUE_CMD, &handle, &data, &size);
82 if (!err) {
83 WARN_ON(size < size_out);
84 memcpy(msg, data, size_out);
85 tegra_gr_comm_release(handle);
86 }
87
88 return err;
89}
90
91static u64 vgpu_connect(void)
92{
93 struct tegra_vgpu_cmd_msg msg;
94 struct tegra_vgpu_connect_params *p = &msg.params.connect;
95 int err;
96
97 msg.cmd = TEGRA_VGPU_CMD_CONNECT;
98 p->module = TEGRA_VGPU_MODULE_GPU;
99 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
100
101 return (err || msg.ret) ? 0 : p->handle;
102}
103
104int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
105{
106 struct tegra_vgpu_cmd_msg msg;
107 struct tegra_vgpu_attrib_params *p = &msg.params.attrib;
108 int err;
109
110 msg.cmd = TEGRA_VGPU_CMD_GET_ATTRIBUTE;
111 msg.handle = handle;
112 p->attrib = attrib;
113 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
114
115 if (err || msg.ret)
116 return -1;
117
118 *value = p->value;
119 return 0;
120}
121
122static void vgpu_handle_channel_event(struct gk20a *g,
123 struct tegra_vgpu_channel_event_info *info)
124{
125 if (info->id >= g->fifo.num_channels ||
126 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
127 nvgpu_err(g, "invalid channel event");
128 return;
129 }
130
131 if (info->is_tsg) {
132 struct tsg_gk20a *tsg = &g->fifo.tsg[info->id];
133
134 gk20a_tsg_event_id_post_event(tsg, info->event_id);
135 } else {
136 struct channel_gk20a *ch = &g->fifo.channel[info->id];
137
138 if (!gk20a_channel_get(ch)) {
139 nvgpu_err(g, "invalid channel %d for event %d",
140 (int)info->id, (int)info->event_id);
141 return;
142 }
143 gk20a_channel_event_id_post_event(ch, info->event_id);
144 gk20a_channel_put(ch);
145 }
146}
147
148
149
150static int vgpu_intr_thread(void *dev_id)
151{
152 struct gk20a *g = dev_id;
153 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
154
155 while (true) {
156 struct tegra_vgpu_intr_msg *msg;
157 u32 sender;
158 void *handle;
159 size_t size;
160 int err;
161
162 err = tegra_gr_comm_recv(TEGRA_GR_COMM_CTX_CLIENT,
163 TEGRA_VGPU_QUEUE_INTR, &handle,
164 (void **)&msg, &size, &sender);
165 if (err == -ETIME)
166 continue;
167 if (WARN_ON(err))
168 continue;
169
170 if (msg->event == TEGRA_VGPU_EVENT_ABORT) {
171 tegra_gr_comm_release(handle);
172 break;
173 }
174
175 switch (msg->event) {
176 case TEGRA_VGPU_EVENT_INTR:
177 if (msg->unit == TEGRA_VGPU_INTR_GR)
178 vgpu_gr_isr(g, &msg->info.gr_intr);
179 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_GR)
180 vgpu_gr_nonstall_isr(g,
181 &msg->info.gr_nonstall_intr);
182 else if (msg->unit == TEGRA_VGPU_INTR_FIFO)
183 vgpu_fifo_isr(g, &msg->info.fifo_intr);
184 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_FIFO)
185 vgpu_fifo_nonstall_isr(g,
186 &msg->info.fifo_nonstall_intr);
187 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_CE2)
188 vgpu_ce2_nonstall_isr(g,
189 &msg->info.ce2_nonstall_intr);
190 break;
191 case TEGRA_VGPU_EVENT_FECS_TRACE:
192 vgpu_fecs_trace_data_update(g);
193 break;
194 case TEGRA_VGPU_EVENT_CHANNEL:
195 vgpu_handle_channel_event(g, &msg->info.channel_event);
196 break;
197 case TEGRA_VGPU_EVENT_SM_ESR:
198 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr);
199 break;
200 default:
201 nvgpu_err(g, "unknown event %u", msg->event);
202 break;
203 }
204
205 tegra_gr_comm_release(handle);
206 }
207
208 while (!nvgpu_thread_should_stop(&priv->intr_handler))
209 msleep(10);
210 return 0;
211}
212
213static void vgpu_remove_support(struct gk20a *g)
214{
215 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
216 struct vgpu_priv_data *priv =
217 vgpu_get_priv_data_from_dev(dev_from_gk20a(g));
218 struct tegra_vgpu_intr_msg msg;
219 int err;
220
221 if (g->dbg_regops_tmp_buf)
222 nvgpu_kfree(g, g->dbg_regops_tmp_buf);
223
224 if (g->pmu.remove_support)
225 g->pmu.remove_support(&g->pmu);
226
227 if (g->gr.remove_support)
228 g->gr.remove_support(&g->gr);
229
230 if (g->fifo.remove_support)
231 g->fifo.remove_support(&g->fifo);
232
233 if (g->mm.remove_support)
234 g->mm.remove_support(&g->mm);
235
236 msg.event = TEGRA_VGPU_EVENT_ABORT;
237 err = tegra_gr_comm_send(TEGRA_GR_COMM_CTX_CLIENT,
238 TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR,
239 &msg, sizeof(msg));
240 WARN_ON(err);
241 nvgpu_thread_stop(&priv->intr_handler);
242
243 /* free mappings to registers, etc*/
244
245 if (l->bar1) {
246 iounmap(l->bar1);
247 l->bar1 = NULL;
248 }
249}
250
251static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
252{
253 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
254
255 nvgpu_mutex_init(&g->poweron_lock);
256 nvgpu_mutex_init(&g->poweroff_lock);
257 l->regs_saved = l->regs;
258 l->bar1_saved = l->bar1;
259
260 nvgpu_init_list_node(&g->pending_sema_waits);
261 nvgpu_raw_spinlock_init(&g->pending_sema_waits_lock);
262
263 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
264 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
265 g->has_syncpoints = platform->has_syncpoints;
266 g->ptimer_src_freq = platform->ptimer_src_freq;
267 g->can_railgate = platform->can_railgate_init;
268 g->railgate_delay = platform->railgate_delay_init;
269
270 __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
271 platform->unify_address_spaces);
272}
273
274static int vgpu_init_support(struct platform_device *pdev)
275{
276 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
277 struct gk20a *g = get_gk20a(&pdev->dev);
278 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
279 void __iomem *regs;
280 int err = 0;
281
282 if (!r) {
283 nvgpu_err(g, "failed to get gk20a bar1");
284 err = -ENXIO;
285 goto fail;
286 }
287
288 if (r->name && !strcmp(r->name, "/vgpu")) {
289 regs = devm_ioremap_resource(&pdev->dev, r);
290 if (IS_ERR(regs)) {
291 nvgpu_err(g, "failed to remap gk20a bar1");
292 err = PTR_ERR(regs);
293 goto fail;
294 }
295 l->bar1 = regs;
296 l->bar1_mem = r;
297 }
298
299 nvgpu_mutex_init(&g->dbg_sessions_lock);
300 nvgpu_mutex_init(&g->client_lock);
301
302 nvgpu_init_list_node(&g->profiler_objects);
303
304 g->dbg_regops_tmp_buf = nvgpu_kzalloc(g, SZ_4K);
305 if (!g->dbg_regops_tmp_buf) {
306 nvgpu_err(g, "couldn't allocate regops tmp buf");
307 return -ENOMEM;
308 }
309 g->dbg_regops_tmp_buf_ops =
310 SZ_4K / sizeof(g->dbg_regops_tmp_buf[0]);
311
312 g->remove_support = vgpu_remove_support;
313 return 0;
314
315 fail:
316 vgpu_remove_support(g);
317 return err;
318}
319
320int vgpu_pm_prepare_poweroff(struct device *dev)
321{
322 struct gk20a *g = get_gk20a(dev);
323 int ret = 0;
324
325 gk20a_dbg_fn("");
326
327 if (!g->power_on)
328 return 0;
329
330 ret = gk20a_channel_suspend(g);
331 if (ret)
332 return ret;
333
334 g->power_on = false;
335
336 return ret;
337}
338
339static void vgpu_detect_chip(struct gk20a *g)
340{
341 struct nvgpu_gpu_params *p = &g->params;
342 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
343
344 p->gpu_arch = priv->constants.arch;
345 p->gpu_impl = priv->constants.impl;
346 p->gpu_rev = priv->constants.rev;
347
348 gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n",
349 p->gpu_arch,
350 p->gpu_impl,
351 p->gpu_rev);
352}
353
354int vgpu_init_gpu_characteristics(struct gk20a *g)
355{
356 int err;
357
358 gk20a_dbg_fn("");
359
360 err = gk20a_init_gpu_characteristics(g);
361 if (err)
362 return err;
363
364 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
365
366 /* features vgpu does not support */
367 __nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
368
369 return 0;
370}
371
372int vgpu_read_ptimer(struct gk20a *g, u64 *value)
373{
374 struct tegra_vgpu_cmd_msg msg = {0};
375 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
376 int err;
377
378 gk20a_dbg_fn("");
379
380 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
381 msg.handle = vgpu_get_handle(g);
382
383 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
384 err = err ? err : msg.ret;
385 if (!err)
386 *value = p->time;
387 else
388 nvgpu_err(g, "vgpu read ptimer failed, err=%d", err);
389
390 return err;
391}
392
393int vgpu_get_timestamps_zipper(struct gk20a *g,
394 u32 source_id, u32 count,
395 struct nvgpu_cpu_time_correlation_sample *samples)
396{
397 struct tegra_vgpu_cmd_msg msg = {0};
398 struct tegra_vgpu_get_timestamps_zipper_params *p =
399 &msg.params.get_timestamps_zipper;
400 int err;
401 u32 i;
402
403 gk20a_dbg_fn("");
404
405 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) {
406 nvgpu_err(g, "count %u overflow", count);
407 return -EINVAL;
408 }
409
410 msg.cmd = TEGRA_VGPU_CMD_GET_TIMESTAMPS_ZIPPER;
411 msg.handle = vgpu_get_handle(g);
412 p->source_id = TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_SRC_ID_TSC;
413 p->count = count;
414
415 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
416 err = err ? err : msg.ret;
417 if (err) {
418 nvgpu_err(g, "vgpu get timestamps zipper failed, err=%d", err);
419 return err;
420 }
421
422 for (i = 0; i < count; i++) {
423 samples[i].cpu_timestamp = p->samples[i].cpu_timestamp;
424 samples[i].gpu_timestamp = p->samples[i].gpu_timestamp;
425 }
426
427 return err;
428}
429
430static int vgpu_init_hal(struct gk20a *g)
431{
432 u32 ver = g->params.gpu_arch + g->params.gpu_impl;
433 int err;
434
435 switch (ver) {
436 case GK20A_GPUID_GM20B:
437 case GK20A_GPUID_GM20B_B:
438 gk20a_dbg_info("gm20b detected");
439 err = vgpu_gm20b_init_hal(g);
440 break;
441 case NVGPU_GPUID_GP10B:
442 gk20a_dbg_info("gp10b detected");
443 err = vgpu_gp10b_init_hal(g);
444 break;
445#ifdef CONFIG_TEGRA_19x_GPU
446 case TEGRA_19x_GPUID:
447 err = vgpu_t19x_init_hal(g);
448 break;
449#endif
450 default:
451 nvgpu_err(g, "no support for %x", ver);
452 err = -ENODEV;
453 break;
454 }
455
456 return err;
457}
458
459int vgpu_pm_finalize_poweron(struct device *dev)
460{
461 struct gk20a *g = get_gk20a(dev);
462 int err;
463
464 gk20a_dbg_fn("");
465
466 if (g->power_on)
467 return 0;
468
469 g->power_on = true;
470
471 vgpu_detect_chip(g);
472 err = vgpu_init_hal(g);
473 if (err)
474 goto done;
475
476 if (g->ops.ltc.init_fs_state)
477 g->ops.ltc.init_fs_state(g);
478
479 err = vgpu_init_mm_support(g);
480 if (err) {
481 nvgpu_err(g, "failed to init gk20a mm");
482 goto done;
483 }
484
485 err = vgpu_init_fifo_support(g);
486 if (err) {
487 nvgpu_err(g, "failed to init gk20a fifo");
488 goto done;
489 }
490
491 err = vgpu_init_gr_support(g);
492 if (err) {
493 nvgpu_err(g, "failed to init gk20a gr");
494 goto done;
495 }
496
497 err = g->ops.chip_init_gpu_characteristics(g);
498 if (err) {
499 nvgpu_err(g, "failed to init gk20a gpu characteristics");
500 goto done;
501 }
502
503 gk20a_ctxsw_trace_init(g);
504 gk20a_sched_ctrl_init(g);
505 gk20a_channel_resume(g);
506
507done:
508 return err;
509}
510
511static int vgpu_qos_notify(struct notifier_block *nb,
512 unsigned long n, void *data)
513{
514 struct gk20a_scale_profile *profile =
515 container_of(nb, struct gk20a_scale_profile,
516 qos_notify_block);
517 struct gk20a *g = get_gk20a(profile->dev);
518 u32 max_freq;
519 int err;
520
521 gk20a_dbg_fn("");
522
523 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
524 err = vgpu_clk_cap_rate(profile->dev, max_freq);
525 if (err)
526 nvgpu_err(g, "%s failed, err=%d", __func__, err);
527
528 return NOTIFY_OK; /* need notify call further */
529}
530
531static int vgpu_pm_qos_init(struct device *dev)
532{
533 struct gk20a *g = get_gk20a(dev);
534 struct gk20a_scale_profile *profile = g->scale_profile;
535
536 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ)) {
537 if (!profile)
538 return -EINVAL;
539 } else {
540 profile = nvgpu_kzalloc(g, sizeof(*profile));
541 if (!profile)
542 return -ENOMEM;
543 g->scale_profile = profile;
544 }
545
546 profile->dev = dev;
547 profile->qos_notify_block.notifier_call = vgpu_qos_notify;
548 pm_qos_add_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
549 &profile->qos_notify_block);
550 return 0;
551}
552
553static void vgpu_pm_qos_remove(struct device *dev)
554{
555 struct gk20a *g = get_gk20a(dev);
556
557 pm_qos_remove_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
558 &g->scale_profile->qos_notify_block);
559 nvgpu_kfree(g, g->scale_profile);
560 g->scale_profile = NULL;
561}
562
563static int vgpu_pm_init(struct device *dev)
564{
565 struct gk20a *g = get_gk20a(dev);
566 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
567 unsigned long *freqs;
568 int num_freqs;
569 int err = 0;
570
571 gk20a_dbg_fn("");
572
573 if (nvgpu_platform_is_simulation(g))
574 return 0;
575
576 __pm_runtime_disable(dev, false);
577
578 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
579 gk20a_scale_init(dev);
580
581 if (l->devfreq) {
582 /* set min/max frequency based on frequency table */
583 err = vgpu_clk_get_freqs(dev, &freqs, &num_freqs);
584 if (err)
585 return err;
586
587 if (num_freqs < 1)
588 return -EINVAL;
589
590 l->devfreq->min_freq = freqs[0];
591 l->devfreq->max_freq = freqs[num_freqs - 1];
592 }
593
594 err = vgpu_pm_qos_init(dev);
595 if (err)
596 return err;
597
598 return err;
599}
600
601static int vgpu_get_constants(struct gk20a *g)
602{
603 struct tegra_vgpu_cmd_msg msg = {};
604 struct tegra_vgpu_constants_params *p = &msg.params.constants;
605 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
606 int err;
607
608 gk20a_dbg_fn("");
609
610 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS;
611 msg.handle = vgpu_get_handle(g);
612 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
613 err = err ? err : msg.ret;
614
615 if (unlikely(err)) {
616 nvgpu_err(g, "%s failed, err=%d", __func__, err);
617 return err;
618 }
619
620 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT ||
621 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) {
622 nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow",
623 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count);
624 return -EINVAL;
625 }
626
627 priv->constants = *p;
628 return 0;
629}
630
631int vgpu_probe(struct platform_device *pdev)
632{
633 struct nvgpu_os_linux *l;
634 struct gk20a *gk20a;
635 int err;
636 struct device *dev = &pdev->dev;
637 struct gk20a_platform *platform = gk20a_get_platform(dev);
638 struct vgpu_priv_data *priv;
639
640 if (!platform) {
641 dev_err(dev, "no platform data\n");
642 return -ENODATA;
643 }
644
645 gk20a_dbg_fn("");
646
647 l = kzalloc(sizeof(*l), GFP_KERNEL);
648 if (!l) {
649 dev_err(dev, "couldn't allocate gk20a support");
650 return -ENOMEM;
651 }
652 gk20a = &l->g;
653 nvgpu_init_gk20a(gk20a);
654
655 nvgpu_kmem_init(gk20a);
656
657 err = nvgpu_init_enabled_flags(gk20a);
658 if (err) {
659 kfree(gk20a);
660 return err;
661 }
662
663 l->dev = dev;
664 if (tegra_platform_is_vdk())
665 __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
666
667 gk20a->is_virtual = true;
668
669 priv = nvgpu_kzalloc(gk20a, sizeof(*priv));
670 if (!priv) {
671 kfree(gk20a);
672 return -ENOMEM;
673 }
674
675 platform->g = gk20a;
676 platform->vgpu_priv = priv;
677
678 err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class);
679 if (err)
680 return err;
681
682 vgpu_init_support(pdev);
683
684 vgpu_init_vars(gk20a, platform);
685
686 init_rwsem(&l->busy_lock);
687
688 nvgpu_spinlock_init(&gk20a->mc_enable_lock);
689
690 gk20a->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms;
691
692 /* Initialize the platform interface. */
693 err = platform->probe(dev);
694 if (err) {
695 if (err == -EPROBE_DEFER)
696 dev_info(dev, "platform probe failed");
697 else
698 dev_err(dev, "platform probe failed");
699 return err;
700 }
701
702 if (platform->late_probe) {
703 err = platform->late_probe(dev);
704 if (err) {
705 dev_err(dev, "late probe failed");
706 return err;
707 }
708 }
709
710 err = vgpu_comm_init(pdev);
711 if (err) {
712 dev_err(dev, "failed to init comm interface\n");
713 return -ENOSYS;
714 }
715
716 priv->virt_handle = vgpu_connect();
717 if (!priv->virt_handle) {
718 dev_err(dev, "failed to connect to server node\n");
719 vgpu_comm_deinit();
720 return -ENOSYS;
721 }
722
723 err = vgpu_get_constants(gk20a);
724 if (err) {
725 vgpu_comm_deinit();
726 return err;
727 }
728
729 err = vgpu_pm_init(dev);
730 if (err) {
731 dev_err(dev, "pm init failed");
732 return err;
733 }
734
735 err = nvgpu_thread_create(&priv->intr_handler, gk20a,
736 vgpu_intr_thread, "gk20a");
737 if (err)
738 return err;
739
740 gk20a_debug_init(gk20a, "gpu.0");
741
742 /* Set DMA parameters to allow larger sgt lists */
743 dev->dma_parms = &l->dma_parms;
744 dma_set_max_seg_size(dev, UINT_MAX);
745
746 gk20a->gr_idle_timeout_default =
747 CONFIG_GK20A_DEFAULT_TIMEOUT;
748 gk20a->timeouts_enabled = true;
749
750 vgpu_create_sysfs(dev);
751 gk20a_init_gr(gk20a);
752
753 nvgpu_ref_init(&gk20a->refcount);
754
755 return 0;
756}
757
758int vgpu_remove(struct platform_device *pdev)
759{
760 struct device *dev = &pdev->dev;
761 struct gk20a *g = get_gk20a(dev);
762 gk20a_dbg_fn("");
763
764 vgpu_pm_qos_remove(dev);
765 if (g->remove_support)
766 g->remove_support(g);
767
768 vgpu_comm_deinit();
769 gk20a_sched_ctrl_cleanup(g);
770 gk20a_user_deinit(dev, &nvgpu_class);
771 vgpu_remove_sysfs(dev);
772 gk20a_get_platform(dev)->g = NULL;
773 gk20a_put(g);
774
775 return 0;
776}