summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/vgpu.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-11-14 09:43:28 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 11:27:19 -0500
commitb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (patch)
tree26e2d919f019d15b51bba4d7b5c938f77ad5cff5 /drivers/gpu/nvgpu/vgpu/vgpu.c
parentb7cc3a2aa6c92a09eed43513287c9062f22ad127 (diff)
gpu: nvgpu: move vgpu code to linux
Most of VGPU code is linux specific but lies in common code So until VGPU code is properly abstracted and made os-independent, move all of VGPU code to linux specific directory Handle corresponding Makefile changes Update all #includes to reflect new paths Add GPL license to newly added linux files Jira NVGPU-387 Change-Id: Ic133e4c80e570bcc273f0dacf45283fefd678923 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599472 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c782
1 files changed, 0 insertions, 782 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
deleted file mode 100644
index 1153b540..00000000
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ /dev/null
@@ -1,782 +0,0 @@
1/*
2 * Virtualized GPU
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/pm_runtime.h>
28#include <linux/pm_qos.h>
29#include <soc/tegra/chip-id.h>
30#include <uapi/linux/nvgpu.h>
31
32#include <nvgpu/kmem.h>
33#include <nvgpu/bug.h>
34#include <nvgpu/enabled.h>
35#include <nvgpu/debug.h>
36#include <nvgpu/bus.h>
37#include <nvgpu/soc.h>
38#include <nvgpu/ctxsw_trace.h>
39
40#include "vgpu/vgpu.h"
41#include "vgpu/fecs_trace_vgpu.h"
42#include "vgpu/clk_vgpu.h"
43#include "gk20a/tsg_gk20a.h"
44#include "gk20a/channel_gk20a.h"
45#include "gm20b/hal_gm20b.h"
46
47#include "common/linux/module.h"
48#include "common/linux/os_linux.h"
49#include "common/linux/ioctl.h"
50#include "common/linux/scale.h"
51#include "common/linux/driver_common.h"
52
53#ifdef CONFIG_TEGRA_19x_GPU
54#include <vgpu/vgpu_t19x.h>
55#include <nvgpu_gpuid_t19x.h>
56#endif
57
58#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
59
60static inline int vgpu_comm_init(struct platform_device *pdev)
61{
62 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
63
64 return tegra_gr_comm_init(pdev, TEGRA_GR_COMM_CTX_CLIENT, 3,
65 queue_sizes, TEGRA_VGPU_QUEUE_CMD,
66 ARRAY_SIZE(queue_sizes));
67}
68
69static inline void vgpu_comm_deinit(void)
70{
71 size_t queue_sizes[] = { TEGRA_VGPU_QUEUE_SIZES };
72
73 tegra_gr_comm_deinit(TEGRA_GR_COMM_CTX_CLIENT, TEGRA_VGPU_QUEUE_CMD,
74 ARRAY_SIZE(queue_sizes));
75}
76
77int vgpu_comm_sendrecv(struct tegra_vgpu_cmd_msg *msg, size_t size_in,
78 size_t size_out)
79{
80 void *handle;
81 size_t size = size_in;
82 void *data = msg;
83 int err;
84
85 err = tegra_gr_comm_sendrecv(TEGRA_GR_COMM_CTX_CLIENT,
86 tegra_gr_comm_get_server_vmid(),
87 TEGRA_VGPU_QUEUE_CMD, &handle, &data, &size);
88 if (!err) {
89 WARN_ON(size < size_out);
90 memcpy(msg, data, size_out);
91 tegra_gr_comm_release(handle);
92 }
93
94 return err;
95}
96
97static u64 vgpu_connect(void)
98{
99 struct tegra_vgpu_cmd_msg msg;
100 struct tegra_vgpu_connect_params *p = &msg.params.connect;
101 int err;
102
103 msg.cmd = TEGRA_VGPU_CMD_CONNECT;
104 p->module = TEGRA_VGPU_MODULE_GPU;
105 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
106
107 return (err || msg.ret) ? 0 : p->handle;
108}
109
110int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
111{
112 struct tegra_vgpu_cmd_msg msg;
113 struct tegra_vgpu_attrib_params *p = &msg.params.attrib;
114 int err;
115
116 msg.cmd = TEGRA_VGPU_CMD_GET_ATTRIBUTE;
117 msg.handle = handle;
118 p->attrib = attrib;
119 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
120
121 if (err || msg.ret)
122 return -1;
123
124 *value = p->value;
125 return 0;
126}
127
128static void vgpu_handle_channel_event(struct gk20a *g,
129 struct tegra_vgpu_channel_event_info *info)
130{
131 if (info->id >= g->fifo.num_channels ||
132 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
133 nvgpu_err(g, "invalid channel event");
134 return;
135 }
136
137 if (info->is_tsg) {
138 struct tsg_gk20a *tsg = &g->fifo.tsg[info->id];
139
140 gk20a_tsg_event_id_post_event(tsg, info->event_id);
141 } else {
142 struct channel_gk20a *ch = &g->fifo.channel[info->id];
143
144 if (!gk20a_channel_get(ch)) {
145 nvgpu_err(g, "invalid channel %d for event %d",
146 (int)info->id, (int)info->event_id);
147 return;
148 }
149 gk20a_channel_event_id_post_event(ch, info->event_id);
150 gk20a_channel_put(ch);
151 }
152}
153
154
155
156static int vgpu_intr_thread(void *dev_id)
157{
158 struct gk20a *g = dev_id;
159 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
160
161 while (true) {
162 struct tegra_vgpu_intr_msg *msg;
163 u32 sender;
164 void *handle;
165 size_t size;
166 int err;
167
168 err = tegra_gr_comm_recv(TEGRA_GR_COMM_CTX_CLIENT,
169 TEGRA_VGPU_QUEUE_INTR, &handle,
170 (void **)&msg, &size, &sender);
171 if (err == -ETIME)
172 continue;
173 if (WARN_ON(err))
174 continue;
175
176 if (msg->event == TEGRA_VGPU_EVENT_ABORT) {
177 tegra_gr_comm_release(handle);
178 break;
179 }
180
181 switch (msg->event) {
182 case TEGRA_VGPU_EVENT_INTR:
183 if (msg->unit == TEGRA_VGPU_INTR_GR)
184 vgpu_gr_isr(g, &msg->info.gr_intr);
185 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_GR)
186 vgpu_gr_nonstall_isr(g,
187 &msg->info.gr_nonstall_intr);
188 else if (msg->unit == TEGRA_VGPU_INTR_FIFO)
189 vgpu_fifo_isr(g, &msg->info.fifo_intr);
190 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_FIFO)
191 vgpu_fifo_nonstall_isr(g,
192 &msg->info.fifo_nonstall_intr);
193 else if (msg->unit == TEGRA_VGPU_NONSTALL_INTR_CE2)
194 vgpu_ce2_nonstall_isr(g,
195 &msg->info.ce2_nonstall_intr);
196 break;
197 case TEGRA_VGPU_EVENT_FECS_TRACE:
198 vgpu_fecs_trace_data_update(g);
199 break;
200 case TEGRA_VGPU_EVENT_CHANNEL:
201 vgpu_handle_channel_event(g, &msg->info.channel_event);
202 break;
203 case TEGRA_VGPU_EVENT_SM_ESR:
204 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr);
205 break;
206 default:
207 nvgpu_err(g, "unknown event %u", msg->event);
208 break;
209 }
210
211 tegra_gr_comm_release(handle);
212 }
213
214 while (!nvgpu_thread_should_stop(&priv->intr_handler))
215 msleep(10);
216 return 0;
217}
218
219static void vgpu_remove_support(struct gk20a *g)
220{
221 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
222 struct vgpu_priv_data *priv =
223 vgpu_get_priv_data_from_dev(dev_from_gk20a(g));
224 struct tegra_vgpu_intr_msg msg;
225 int err;
226
227 if (g->dbg_regops_tmp_buf)
228 nvgpu_kfree(g, g->dbg_regops_tmp_buf);
229
230 if (g->pmu.remove_support)
231 g->pmu.remove_support(&g->pmu);
232
233 if (g->gr.remove_support)
234 g->gr.remove_support(&g->gr);
235
236 if (g->fifo.remove_support)
237 g->fifo.remove_support(&g->fifo);
238
239 if (g->mm.remove_support)
240 g->mm.remove_support(&g->mm);
241
242 msg.event = TEGRA_VGPU_EVENT_ABORT;
243 err = tegra_gr_comm_send(TEGRA_GR_COMM_CTX_CLIENT,
244 TEGRA_GR_COMM_ID_SELF, TEGRA_VGPU_QUEUE_INTR,
245 &msg, sizeof(msg));
246 WARN_ON(err);
247 nvgpu_thread_stop(&priv->intr_handler);
248
249 /* free mappings to registers, etc*/
250
251 if (l->bar1) {
252 iounmap(l->bar1);
253 l->bar1 = NULL;
254 }
255}
256
257static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
258{
259 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
260
261 nvgpu_mutex_init(&g->poweron_lock);
262 nvgpu_mutex_init(&g->poweroff_lock);
263 l->regs_saved = l->regs;
264 l->bar1_saved = l->bar1;
265
266 nvgpu_init_list_node(&g->pending_sema_waits);
267 nvgpu_raw_spinlock_init(&g->pending_sema_waits_lock);
268
269 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
270 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
271 g->has_syncpoints = platform->has_syncpoints;
272 g->ptimer_src_freq = platform->ptimer_src_freq;
273 g->can_railgate = platform->can_railgate_init;
274 g->railgate_delay = platform->railgate_delay_init;
275
276 __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
277 platform->unify_address_spaces);
278}
279
280static int vgpu_init_support(struct platform_device *pdev)
281{
282 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
283 struct gk20a *g = get_gk20a(&pdev->dev);
284 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
285 void __iomem *regs;
286 int err = 0;
287
288 if (!r) {
289 nvgpu_err(g, "failed to get gk20a bar1");
290 err = -ENXIO;
291 goto fail;
292 }
293
294 if (r->name && !strcmp(r->name, "/vgpu")) {
295 regs = devm_ioremap_resource(&pdev->dev, r);
296 if (IS_ERR(regs)) {
297 nvgpu_err(g, "failed to remap gk20a bar1");
298 err = PTR_ERR(regs);
299 goto fail;
300 }
301 l->bar1 = regs;
302 l->bar1_mem = r;
303 }
304
305 nvgpu_mutex_init(&g->dbg_sessions_lock);
306 nvgpu_mutex_init(&g->client_lock);
307
308 nvgpu_init_list_node(&g->profiler_objects);
309
310 g->dbg_regops_tmp_buf = nvgpu_kzalloc(g, SZ_4K);
311 if (!g->dbg_regops_tmp_buf) {
312 nvgpu_err(g, "couldn't allocate regops tmp buf");
313 return -ENOMEM;
314 }
315 g->dbg_regops_tmp_buf_ops =
316 SZ_4K / sizeof(g->dbg_regops_tmp_buf[0]);
317
318 g->remove_support = vgpu_remove_support;
319 return 0;
320
321 fail:
322 vgpu_remove_support(g);
323 return err;
324}
325
326int vgpu_pm_prepare_poweroff(struct device *dev)
327{
328 struct gk20a *g = get_gk20a(dev);
329 int ret = 0;
330
331 gk20a_dbg_fn("");
332
333 if (!g->power_on)
334 return 0;
335
336 ret = gk20a_channel_suspend(g);
337 if (ret)
338 return ret;
339
340 g->power_on = false;
341
342 return ret;
343}
344
345static void vgpu_detect_chip(struct gk20a *g)
346{
347 struct nvgpu_gpu_params *p = &g->params;
348 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
349
350 p->gpu_arch = priv->constants.arch;
351 p->gpu_impl = priv->constants.impl;
352 p->gpu_rev = priv->constants.rev;
353
354 gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n",
355 p->gpu_arch,
356 p->gpu_impl,
357 p->gpu_rev);
358}
359
360int vgpu_init_gpu_characteristics(struct gk20a *g)
361{
362 int err;
363
364 gk20a_dbg_fn("");
365
366 err = gk20a_init_gpu_characteristics(g);
367 if (err)
368 return err;
369
370 __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
371
372 /* features vgpu does not support */
373 __nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
374
375 return 0;
376}
377
378int vgpu_read_ptimer(struct gk20a *g, u64 *value)
379{
380 struct tegra_vgpu_cmd_msg msg = {0};
381 struct tegra_vgpu_read_ptimer_params *p = &msg.params.read_ptimer;
382 int err;
383
384 gk20a_dbg_fn("");
385
386 msg.cmd = TEGRA_VGPU_CMD_READ_PTIMER;
387 msg.handle = vgpu_get_handle(g);
388
389 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
390 err = err ? err : msg.ret;
391 if (!err)
392 *value = p->time;
393 else
394 nvgpu_err(g, "vgpu read ptimer failed, err=%d", err);
395
396 return err;
397}
398
399int vgpu_get_timestamps_zipper(struct gk20a *g,
400 u32 source_id, u32 count,
401 struct nvgpu_cpu_time_correlation_sample *samples)
402{
403 struct tegra_vgpu_cmd_msg msg = {0};
404 struct tegra_vgpu_get_timestamps_zipper_params *p =
405 &msg.params.get_timestamps_zipper;
406 int err;
407 u32 i;
408
409 gk20a_dbg_fn("");
410
411 if (count > TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_MAX_COUNT) {
412 nvgpu_err(g, "count %u overflow", count);
413 return -EINVAL;
414 }
415
416 msg.cmd = TEGRA_VGPU_CMD_GET_TIMESTAMPS_ZIPPER;
417 msg.handle = vgpu_get_handle(g);
418 p->source_id = TEGRA_VGPU_GET_TIMESTAMPS_ZIPPER_SRC_ID_TSC;
419 p->count = count;
420
421 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
422 err = err ? err : msg.ret;
423 if (err) {
424 nvgpu_err(g, "vgpu get timestamps zipper failed, err=%d", err);
425 return err;
426 }
427
428 for (i = 0; i < count; i++) {
429 samples[i].cpu_timestamp = p->samples[i].cpu_timestamp;
430 samples[i].gpu_timestamp = p->samples[i].gpu_timestamp;
431 }
432
433 return err;
434}
435
436static int vgpu_init_hal(struct gk20a *g)
437{
438 u32 ver = g->params.gpu_arch + g->params.gpu_impl;
439 int err;
440
441 switch (ver) {
442 case GK20A_GPUID_GM20B:
443 case GK20A_GPUID_GM20B_B:
444 gk20a_dbg_info("gm20b detected");
445 err = vgpu_gm20b_init_hal(g);
446 break;
447 case NVGPU_GPUID_GP10B:
448 gk20a_dbg_info("gp10b detected");
449 err = vgpu_gp10b_init_hal(g);
450 break;
451#ifdef CONFIG_TEGRA_19x_GPU
452 case TEGRA_19x_GPUID:
453 err = vgpu_t19x_init_hal(g);
454 break;
455#endif
456 default:
457 nvgpu_err(g, "no support for %x", ver);
458 err = -ENODEV;
459 break;
460 }
461
462 return err;
463}
464
465int vgpu_pm_finalize_poweron(struct device *dev)
466{
467 struct gk20a *g = get_gk20a(dev);
468 int err;
469
470 gk20a_dbg_fn("");
471
472 if (g->power_on)
473 return 0;
474
475 g->power_on = true;
476
477 vgpu_detect_chip(g);
478 err = vgpu_init_hal(g);
479 if (err)
480 goto done;
481
482 if (g->ops.ltc.init_fs_state)
483 g->ops.ltc.init_fs_state(g);
484
485 err = vgpu_init_mm_support(g);
486 if (err) {
487 nvgpu_err(g, "failed to init gk20a mm");
488 goto done;
489 }
490
491 err = vgpu_init_fifo_support(g);
492 if (err) {
493 nvgpu_err(g, "failed to init gk20a fifo");
494 goto done;
495 }
496
497 err = vgpu_init_gr_support(g);
498 if (err) {
499 nvgpu_err(g, "failed to init gk20a gr");
500 goto done;
501 }
502
503 err = g->ops.chip_init_gpu_characteristics(g);
504 if (err) {
505 nvgpu_err(g, "failed to init gk20a gpu characteristics");
506 goto done;
507 }
508
509 gk20a_ctxsw_trace_init(g);
510 gk20a_sched_ctrl_init(g);
511 gk20a_channel_resume(g);
512
513done:
514 return err;
515}
516
517static int vgpu_qos_notify(struct notifier_block *nb,
518 unsigned long n, void *data)
519{
520 struct gk20a_scale_profile *profile =
521 container_of(nb, struct gk20a_scale_profile,
522 qos_notify_block);
523 struct gk20a *g = get_gk20a(profile->dev);
524 u32 max_freq;
525 int err;
526
527 gk20a_dbg_fn("");
528
529 max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
530 err = vgpu_clk_cap_rate(profile->dev, max_freq);
531 if (err)
532 nvgpu_err(g, "%s failed, err=%d", __func__, err);
533
534 return NOTIFY_OK; /* need notify call further */
535}
536
537static int vgpu_pm_qos_init(struct device *dev)
538{
539 struct gk20a *g = get_gk20a(dev);
540 struct gk20a_scale_profile *profile = g->scale_profile;
541
542 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ)) {
543 if (!profile)
544 return -EINVAL;
545 } else {
546 profile = nvgpu_kzalloc(g, sizeof(*profile));
547 if (!profile)
548 return -ENOMEM;
549 g->scale_profile = profile;
550 }
551
552 profile->dev = dev;
553 profile->qos_notify_block.notifier_call = vgpu_qos_notify;
554 pm_qos_add_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
555 &profile->qos_notify_block);
556 return 0;
557}
558
559static void vgpu_pm_qos_remove(struct device *dev)
560{
561 struct gk20a *g = get_gk20a(dev);
562
563 pm_qos_remove_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
564 &g->scale_profile->qos_notify_block);
565 nvgpu_kfree(g, g->scale_profile);
566 g->scale_profile = NULL;
567}
568
569static int vgpu_pm_init(struct device *dev)
570{
571 struct gk20a *g = get_gk20a(dev);
572 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
573 unsigned long *freqs;
574 int num_freqs;
575 int err = 0;
576
577 gk20a_dbg_fn("");
578
579 if (nvgpu_platform_is_simulation(g))
580 return 0;
581
582 __pm_runtime_disable(dev, false);
583
584 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
585 gk20a_scale_init(dev);
586
587 if (l->devfreq) {
588 /* set min/max frequency based on frequency table */
589 err = vgpu_clk_get_freqs(dev, &freqs, &num_freqs);
590 if (err)
591 return err;
592
593 if (num_freqs < 1)
594 return -EINVAL;
595
596 l->devfreq->min_freq = freqs[0];
597 l->devfreq->max_freq = freqs[num_freqs - 1];
598 }
599
600 err = vgpu_pm_qos_init(dev);
601 if (err)
602 return err;
603
604 return err;
605}
606
607static int vgpu_get_constants(struct gk20a *g)
608{
609 struct tegra_vgpu_cmd_msg msg = {};
610 struct tegra_vgpu_constants_params *p = &msg.params.constants;
611 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
612 int err;
613
614 gk20a_dbg_fn("");
615
616 msg.cmd = TEGRA_VGPU_CMD_GET_CONSTANTS;
617 msg.handle = vgpu_get_handle(g);
618 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
619 err = err ? err : msg.ret;
620
621 if (unlikely(err)) {
622 nvgpu_err(g, "%s failed, err=%d", __func__, err);
623 return err;
624 }
625
626 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT ||
627 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) {
628 nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow",
629 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count);
630 return -EINVAL;
631 }
632
633 priv->constants = *p;
634 return 0;
635}
636
637int vgpu_probe(struct platform_device *pdev)
638{
639 struct nvgpu_os_linux *l;
640 struct gk20a *gk20a;
641 int err;
642 struct device *dev = &pdev->dev;
643 struct gk20a_platform *platform = gk20a_get_platform(dev);
644 struct vgpu_priv_data *priv;
645
646 if (!platform) {
647 dev_err(dev, "no platform data\n");
648 return -ENODATA;
649 }
650
651 gk20a_dbg_fn("");
652
653 l = kzalloc(sizeof(*l), GFP_KERNEL);
654 if (!l) {
655 dev_err(dev, "couldn't allocate gk20a support");
656 return -ENOMEM;
657 }
658 gk20a = &l->g;
659 nvgpu_init_gk20a(gk20a);
660
661 nvgpu_kmem_init(gk20a);
662
663 err = nvgpu_init_enabled_flags(gk20a);
664 if (err) {
665 kfree(gk20a);
666 return err;
667 }
668
669 l->dev = dev;
670 if (tegra_platform_is_vdk())
671 __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
672
673 gk20a->is_virtual = true;
674
675 priv = nvgpu_kzalloc(gk20a, sizeof(*priv));
676 if (!priv) {
677 kfree(gk20a);
678 return -ENOMEM;
679 }
680
681 platform->g = gk20a;
682 platform->vgpu_priv = priv;
683
684 err = gk20a_user_init(dev, INTERFACE_NAME, &nvgpu_class);
685 if (err)
686 return err;
687
688 vgpu_init_support(pdev);
689
690 vgpu_init_vars(gk20a, platform);
691
692 init_rwsem(&l->busy_lock);
693
694 nvgpu_spinlock_init(&gk20a->mc_enable_lock);
695
696 gk20a->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms;
697
698 /* Initialize the platform interface. */
699 err = platform->probe(dev);
700 if (err) {
701 if (err == -EPROBE_DEFER)
702 dev_info(dev, "platform probe failed");
703 else
704 dev_err(dev, "platform probe failed");
705 return err;
706 }
707
708 if (platform->late_probe) {
709 err = platform->late_probe(dev);
710 if (err) {
711 dev_err(dev, "late probe failed");
712 return err;
713 }
714 }
715
716 err = vgpu_comm_init(pdev);
717 if (err) {
718 dev_err(dev, "failed to init comm interface\n");
719 return -ENOSYS;
720 }
721
722 priv->virt_handle = vgpu_connect();
723 if (!priv->virt_handle) {
724 dev_err(dev, "failed to connect to server node\n");
725 vgpu_comm_deinit();
726 return -ENOSYS;
727 }
728
729 err = vgpu_get_constants(gk20a);
730 if (err) {
731 vgpu_comm_deinit();
732 return err;
733 }
734
735 err = vgpu_pm_init(dev);
736 if (err) {
737 dev_err(dev, "pm init failed");
738 return err;
739 }
740
741 err = nvgpu_thread_create(&priv->intr_handler, gk20a,
742 vgpu_intr_thread, "gk20a");
743 if (err)
744 return err;
745
746 gk20a_debug_init(gk20a, "gpu.0");
747
748 /* Set DMA parameters to allow larger sgt lists */
749 dev->dma_parms = &l->dma_parms;
750 dma_set_max_seg_size(dev, UINT_MAX);
751
752 gk20a->gr_idle_timeout_default =
753 CONFIG_GK20A_DEFAULT_TIMEOUT;
754 gk20a->timeouts_enabled = true;
755
756 vgpu_create_sysfs(dev);
757 gk20a_init_gr(gk20a);
758
759 nvgpu_ref_init(&gk20a->refcount);
760
761 return 0;
762}
763
764int vgpu_remove(struct platform_device *pdev)
765{
766 struct device *dev = &pdev->dev;
767 struct gk20a *g = get_gk20a(dev);
768 gk20a_dbg_fn("");
769
770 vgpu_pm_qos_remove(dev);
771 if (g->remove_support)
772 g->remove_support(g);
773
774 vgpu_comm_deinit();
775 gk20a_sched_ctrl_cleanup(g);
776 gk20a_user_deinit(dev, &nvgpu_class);
777 vgpu_remove_sysfs(dev);
778 gk20a_get_platform(dev)->g = NULL;
779 gk20a_put(g);
780
781 return 0;
782}