aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/module.c')
-rw-r--r--include/os/linux/module.c1547
1 files changed, 0 insertions, 1547 deletions
diff --git a/include/os/linux/module.c b/include/os/linux/module.c
deleted file mode 100644
index fdbab46..0000000
--- a/include/os/linux/module.c
+++ /dev/null
@@ -1,1547 +0,0 @@
1/*
2 * GK20A Graphics
3 *
4 * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/of_platform.h>
23#include <linux/of_address.h>
24#include <linux/interrupt.h>
25#include <linux/pm_runtime.h>
26#include <linux/reset.h>
27#include <linux/reboot.h>
28#include <linux/notifier.h>
29#include <linux/platform/tegra/common.h>
30#include <linux/pci.h>
31
32#include <uapi/linux/nvgpu.h>
33#include <dt-bindings/soc/gm20b-fuse.h>
34#include <dt-bindings/soc/gp10b-fuse.h>
35#include <dt-bindings/soc/gv11b-fuse.h>
36
37#include <soc/tegra/fuse.h>
38
39#include <nvgpu/hal_init.h>
40#include <nvgpu/dma.h>
41#include <nvgpu/kmem.h>
42#include <nvgpu/nvgpu_common.h>
43#include <nvgpu/soc.h>
44#include <nvgpu/enabled.h>
45#include <nvgpu/debug.h>
46#include <nvgpu/ctxsw_trace.h>
47#include <nvgpu/vidmem.h>
48#include <nvgpu/sim.h>
49#include <nvgpu/clk_arb.h>
50#include <nvgpu/timers.h>
51#include <nvgpu/channel.h>
52#include <nvgpu/nvgpu_err.h>
53
54#include "platform_gk20a.h"
55#include "sysfs.h"
56#include "vgpu/vgpu_linux.h"
57#include "scale.h"
58#include "pci.h"
59#include "module.h"
60#include "module_usermode.h"
61#include "intr.h"
62#include "ioctl.h"
63#include "ioctl_ctrl.h"
64
65#include "os_linux.h"
66#include "os_ops.h"
67#include "ctxsw_trace.h"
68#include "driver_common.h"
69#include "channel.h"
70#include "debug_pmgr.h"
71
72#ifdef CONFIG_NVGPU_SUPPORT_CDE
73#include "cde.h"
74#endif
75
76#define CLASS_NAME "nvidia-gpu"
77/* TODO: Change to e.g. "nvidia-gpu%s" once we have symlinks in place. */
78
79#define GK20A_WAIT_FOR_IDLE_MS 2000
80
81#define CREATE_TRACE_POINTS
82#include <trace/events/gk20a.h>
83
84static int nvgpu_kernel_shutdown_notification(struct notifier_block *nb,
85 unsigned long event, void *unused)
86{
87 struct gk20a *g = container_of(nb, struct gk20a, nvgpu_reboot_nb);
88
89 __nvgpu_set_enabled(g, NVGPU_KERNEL_IS_DYING, true);
90 return NOTIFY_DONE;
91}
92
93struct device_node *nvgpu_get_node(struct gk20a *g)
94{
95 struct device *dev = dev_from_gk20a(g);
96
97 if (dev_is_pci(dev)) {
98 struct pci_bus *bus = to_pci_dev(dev)->bus;
99
100 while (!pci_is_root_bus(bus))
101 bus = bus->parent;
102
103 return bus->bridge->parent->of_node;
104 }
105
106 return dev->of_node;
107}
108
109void gk20a_busy_noresume(struct gk20a *g)
110{
111 pm_runtime_get_noresume(dev_from_gk20a(g));
112}
113
114/*
115 * Check if the device can go busy.
116 */
117static int nvgpu_can_busy(struct gk20a *g)
118{
119 /* Can't do anything if the system is rebooting/shutting down. */
120 if (nvgpu_is_enabled(g, NVGPU_KERNEL_IS_DYING))
121 return 0;
122
123 /* Can't do anything if the driver is restarting. */
124 if (nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING))
125 return 0;
126
127 return 1;
128}
129
130int gk20a_busy(struct gk20a *g)
131{
132 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
133 int ret = 0;
134 struct device *dev;
135
136 if (!g)
137 return -ENODEV;
138
139 atomic_inc(&g->usage_count.atomic_var);
140
141 down_read(&l->busy_lock);
142
143 if (!nvgpu_can_busy(g)) {
144 ret = -ENODEV;
145 atomic_dec(&g->usage_count.atomic_var);
146 goto fail;
147 }
148
149 dev = dev_from_gk20a(g);
150
151 if (pm_runtime_enabled(dev)) {
152 /* Increment usage count and attempt to resume device */
153 ret = pm_runtime_get_sync(dev);
154 if (ret < 0) {
155 /* Mark suspended so runtime pm will retry later */
156 pm_runtime_set_suspended(dev);
157 pm_runtime_put_noidle(dev);
158 atomic_dec(&g->usage_count.atomic_var);
159 goto fail;
160 }
161 } else {
162 ret = gk20a_gpu_is_virtual(dev) ?
163 vgpu_pm_finalize_poweron(dev) :
164 gk20a_pm_finalize_poweron(dev);
165 if (ret) {
166 atomic_dec(&g->usage_count.atomic_var);
167 goto fail;
168 }
169 }
170
171fail:
172 up_read(&l->busy_lock);
173
174 return ret < 0 ? ret : 0;
175}
176
177void gk20a_idle_nosuspend(struct gk20a *g)
178{
179 pm_runtime_put_noidle(dev_from_gk20a(g));
180}
181
182void gk20a_idle(struct gk20a *g)
183{
184 struct device *dev;
185
186 atomic_dec(&g->usage_count.atomic_var);
187
188 dev = dev_from_gk20a(g);
189
190 if (!(dev && nvgpu_can_busy(g)))
191 return;
192
193 if (pm_runtime_enabled(dev)) {
194 pm_runtime_mark_last_busy(dev);
195 pm_runtime_put_sync_autosuspend(dev);
196 }
197}
198
199/*
200 * Undoes gk20a_lockout_registers().
201 */
202static int gk20a_restore_registers(struct gk20a *g)
203{
204 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
205
206 l->regs = l->regs_saved;
207 l->bar1 = l->bar1_saved;
208
209 nvgpu_restore_usermode_registers(g);
210
211 return 0;
212}
213
214int nvgpu_finalize_poweron_linux(struct nvgpu_os_linux *l)
215{
216 struct gk20a *g = &l->g;
217 int err;
218
219 if (l->init_done)
220 return 0;
221
222 err = nvgpu_init_channel_support_linux(l);
223 if (err) {
224 nvgpu_err(g, "failed to init linux channel support");
225 return err;
226 }
227
228 if (l->ops.clk.init_debugfs) {
229 err = l->ops.clk.init_debugfs(g);
230 if (err) {
231 nvgpu_err(g, "failed to init linux clk debugfs");
232 return err;
233 }
234 }
235
236 if (l->ops.therm.init_debugfs) {
237 err = l->ops.therm.init_debugfs(g);
238 if (err) {
239 nvgpu_err(g, "failed to init linux therm debugfs");
240 return err;
241 }
242 }
243
244 if (l->ops.fecs_trace.init_debugfs) {
245 err = l->ops.fecs_trace.init_debugfs(g);
246 if (err) {
247 nvgpu_err(g, "failed to init linux fecs trace debugfs");
248 return err;
249 }
250 }
251
252 err = nvgpu_pmgr_init_debugfs_linux(l);
253 if (err) {
254 nvgpu_err(g, "failed to init linux pmgr debugfs");
255 return err;
256 }
257
258 l->init_done = true;
259
260 return 0;
261}
262
263bool gk20a_check_poweron(struct gk20a *g)
264{
265 bool ret;
266
267 nvgpu_mutex_acquire(&g->power_lock);
268 ret = g->power_on;
269 nvgpu_mutex_release(&g->power_lock);
270
271 return ret;
272}
273
274int gk20a_pm_finalize_poweron(struct device *dev)
275{
276 struct gk20a *g = get_gk20a(dev);
277 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
278 struct gk20a_platform *platform = gk20a_get_platform(dev);
279 int err = 0;
280
281 nvgpu_log_fn(g, " ");
282
283 nvgpu_mutex_acquire(&g->power_lock);
284
285 if (g->power_on)
286 goto done;
287
288 trace_gk20a_finalize_poweron(dev_name(dev));
289
290 /* Increment platform power refcount */
291 if (platform->busy) {
292 err = platform->busy(dev);
293 if (err < 0) {
294 nvgpu_err(g, "failed to poweron platform dependency");
295 goto done;
296 }
297 }
298
299 err = gk20a_restore_registers(g);
300 if (err)
301 goto done;
302
303 nvgpu_restore_usermode_for_poweron(g);
304
305 /* Enable interrupt workqueue */
306 if (!l->nonstall_work_queue) {
307 l->nonstall_work_queue = alloc_workqueue("%s",
308 WQ_HIGHPRI, 1, "mc_nonstall");
309 INIT_WORK(&l->nonstall_fn_work, nvgpu_intr_nonstall_cb);
310 }
311
312 err = nvgpu_detect_chip(g);
313 if (err)
314 goto done;
315
316 if (g->sim) {
317 if (g->sim->sim_init_late)
318 g->sim->sim_init_late(g);
319 }
320
321 err = gk20a_finalize_poweron(g);
322 if (err)
323 goto done;
324
325 err = nvgpu_init_os_linux_ops(l);
326 if (err)
327 goto done;
328
329 err = nvgpu_finalize_poweron_linux(l);
330 if (err)
331 goto done;
332
333 nvgpu_init_mm_ce_context(g);
334
335 nvgpu_vidmem_thread_unpause(&g->mm);
336
337 /* Initialise scaling: it will initialize scaling drive only once */
338 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ) &&
339 nvgpu_platform_is_silicon(g)) {
340 gk20a_scale_init(dev);
341 if (platform->initscale)
342 platform->initscale(dev);
343 }
344
345 trace_gk20a_finalize_poweron_done(dev_name(dev));
346
347 enable_irq(g->irq_stall);
348 if (g->irq_stall != g->irq_nonstall)
349 enable_irq(g->irq_nonstall);
350 g->irqs_enabled = 1;
351
352 gk20a_scale_resume(dev_from_gk20a(g));
353
354#ifdef CONFIG_NVGPU_SUPPORT_CDE
355 if (platform->has_cde)
356 gk20a_init_cde_support(l);
357#endif
358
359#ifdef CONFIG_NVGPU_SUPPORT_LINUX_ECC_ERROR_REPORTING
360 nvgpu_enable_ecc_reporting(g);
361#endif
362
363 err = gk20a_sched_ctrl_init(g);
364 if (err) {
365 nvgpu_err(g, "failed to init sched control");
366 goto done;
367 }
368
369 g->sw_ready = true;
370
371done:
372 if (err) {
373 g->power_on = false;
374
375#ifdef CONFIG_NVGPU_SUPPORT_LINUX_ECC_ERROR_REPORTING
376 nvgpu_disable_ecc_reporting(g);
377#endif
378 }
379
380 nvgpu_mutex_release(&g->power_lock);
381 return err;
382}
383
384/*
385 * Locks out the driver from accessing GPU registers. This prevents access to
386 * thse registers after the GPU has been clock or power gated. This should help
387 * find annoying bugs where register reads and writes are silently dropped
388 * after the GPU has been turned off. On older chips these reads and writes can
389 * also lock the entire CPU up.
390 */
391static int gk20a_lockout_registers(struct gk20a *g)
392{
393 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
394
395 l->regs = NULL;
396 l->bar1 = NULL;
397
398 nvgpu_lockout_usermode_registers(g);
399
400 return 0;
401}
402
403static int gk20a_pm_prepare_poweroff(struct device *dev)
404{
405 struct gk20a *g = get_gk20a(dev);
406#ifdef CONFIG_NVGPU_SUPPORT_CDE
407 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
408#endif
409 struct gk20a_platform *platform = gk20a_get_platform(dev);
410 bool irqs_enabled;
411 int ret = 0;
412
413 nvgpu_log_fn(g, " ");
414
415 nvgpu_mutex_acquire(&g->power_lock);
416
417 if (!g->power_on)
418 goto done;
419
420 /* disable IRQs and wait for completion */
421 irqs_enabled = g->irqs_enabled;
422 if (irqs_enabled) {
423 disable_irq(g->irq_stall);
424 if (g->irq_stall != g->irq_nonstall)
425 disable_irq(g->irq_nonstall);
426 g->irqs_enabled = 0;
427 }
428
429 gk20a_scale_suspend(dev);
430
431#ifdef CONFIG_NVGPU_SUPPORT_CDE
432 gk20a_cde_suspend(l);
433#endif
434
435 ret = gk20a_prepare_poweroff(g);
436 if (ret)
437 goto error;
438
439 /* Decrement platform power refcount */
440 if (platform->idle)
441 platform->idle(dev);
442
443 /* Stop CPU from accessing the GPU registers. */
444 gk20a_lockout_registers(g);
445
446#ifdef CONFIG_NVGPU_SUPPORT_LINUX_ECC_ERROR_REPORTING
447 nvgpu_disable_ecc_reporting(g);
448#endif
449
450 nvgpu_hide_usermode_for_poweroff(g);
451 nvgpu_mutex_release(&g->power_lock);
452 return 0;
453
454error:
455 /* re-enabled IRQs if previously enabled */
456 if (irqs_enabled) {
457 enable_irq(g->irq_stall);
458 if (g->irq_stall != g->irq_nonstall)
459 enable_irq(g->irq_nonstall);
460 g->irqs_enabled = 1;
461 }
462
463 gk20a_scale_resume(dev);
464done:
465 nvgpu_mutex_release(&g->power_lock);
466
467 return ret;
468}
469
470static struct of_device_id tegra_gk20a_of_match[] = {
471#ifdef CONFIG_TEGRA_GK20A
472 { .compatible = "nvidia,tegra210-gm20b",
473 .data = &gm20b_tegra_platform },
474 { .compatible = "nvidia,tegra186-gp10b",
475 .data = &gp10b_tegra_platform },
476 { .compatible = "nvidia,gv11b",
477 .data = &gv11b_tegra_platform },
478#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
479 { .compatible = "nvidia,gv11b-vgpu",
480 .data = &gv11b_vgpu_tegra_platform},
481#endif
482#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
483 { .compatible = "nvidia,tegra124-gk20a-vgpu",
484 .data = &vgpu_tegra_platform },
485#endif
486#endif
487
488 { },
489};
490MODULE_DEVICE_TABLE(of, tegra_gk20a_of_match);
491
492#ifdef CONFIG_PM
493/**
494 * __gk20a_do_idle() - force the GPU to idle and railgate
495 *
496 * In success, this call MUST be balanced by caller with __gk20a_do_unidle()
497 *
498 * Acquires two locks : &l->busy_lock and &platform->railgate_lock
499 * In success, we hold these locks and return
500 * In failure, we release these locks and return
501 */
502int __gk20a_do_idle(struct gk20a *g, bool force_reset)
503{
504 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
505 struct device *dev = dev_from_gk20a(g);
506 struct gk20a_platform *platform = dev_get_drvdata(dev);
507 struct nvgpu_timeout timeout;
508 int ref_cnt;
509 int target_ref_cnt = 0;
510 bool is_railgated;
511 int err = 0;
512
513 /*
514 * Hold back deterministic submits and changes to deterministic
515 * channels - this must be outside the power busy locks.
516 */
517 gk20a_channel_deterministic_idle(g);
518
519 /* acquire busy lock to block other busy() calls */
520 down_write(&l->busy_lock);
521
522 /* acquire railgate lock to prevent unrailgate in midst of do_idle() */
523 nvgpu_mutex_acquire(&platform->railgate_lock);
524
525 /* check if it is already railgated ? */
526 if (platform->is_railgated(dev))
527 return 0;
528
529 /*
530 * release railgate_lock, prevent suspend by incrementing usage counter,
531 * re-acquire railgate_lock
532 */
533 nvgpu_mutex_release(&platform->railgate_lock);
534 pm_runtime_get_sync(dev);
535
536 /*
537 * One refcount taken in this API
538 * If User disables rail gating, we take one more
539 * extra refcount
540 */
541 if (nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE))
542 target_ref_cnt = 1;
543 else
544 target_ref_cnt = 2;
545 nvgpu_mutex_acquire(&platform->railgate_lock);
546
547 nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS,
548 NVGPU_TIMER_CPU_TIMER);
549
550 /* check and wait until GPU is idle (with a timeout) */
551 do {
552 nvgpu_usleep_range(1000, 1100);
553 ref_cnt = atomic_read(&dev->power.usage_count);
554 } while (ref_cnt != target_ref_cnt && !nvgpu_timeout_expired(&timeout));
555
556 if (ref_cnt != target_ref_cnt) {
557 nvgpu_err(g, "failed to idle - refcount %d != target_ref_cnt",
558 ref_cnt);
559 goto fail_drop_usage_count;
560 }
561
562 /* check if global force_reset flag is set */
563 force_reset |= platform->force_reset_in_do_idle;
564
565 nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS,
566 NVGPU_TIMER_CPU_TIMER);
567
568 if (nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE) && !force_reset) {
569 /*
570 * Case 1 : GPU railgate is supported
571 *
572 * if GPU is now idle, we will have only one ref count,
573 * drop this ref which will rail gate the GPU
574 */
575 pm_runtime_put_sync(dev);
576
577 /* add sufficient delay to allow GPU to rail gate */
578 nvgpu_msleep(g->railgate_delay);
579
580 /* check in loop if GPU is railgated or not */
581 do {
582 nvgpu_usleep_range(1000, 1100);
583 is_railgated = platform->is_railgated(dev);
584 } while (!is_railgated && !nvgpu_timeout_expired(&timeout));
585
586 if (is_railgated) {
587 return 0;
588 } else {
589 nvgpu_err(g, "failed to idle in timeout");
590 goto fail_timeout;
591 }
592 } else {
593 /*
594 * Case 2 : GPU railgate is not supported or we explicitly
595 * do not want to depend on runtime PM
596 *
597 * if GPU is now idle, call prepare_poweroff() to save the
598 * state and then do explicit railgate
599 *
600 * __gk20a_do_unidle() needs to unrailgate, call
601 * finalize_poweron(), and then call pm_runtime_put_sync()
602 * to balance the GPU usage counter
603 */
604
605 /* Save the GPU state */
606 err = gk20a_pm_prepare_poweroff(dev);
607 if (err)
608 goto fail_drop_usage_count;
609
610 /* railgate GPU */
611 platform->railgate(dev);
612
613 nvgpu_udelay(10);
614
615 g->forced_reset = true;
616 return 0;
617 }
618
619fail_drop_usage_count:
620 pm_runtime_put_noidle(dev);
621fail_timeout:
622 nvgpu_mutex_release(&platform->railgate_lock);
623 up_write(&l->busy_lock);
624 gk20a_channel_deterministic_unidle(g);
625 return -EBUSY;
626}
627
628/**
629 * gk20a_do_idle() - wrap up for __gk20a_do_idle() to be called
630 * from outside of GPU driver
631 *
632 * In success, this call MUST be balanced by caller with gk20a_do_unidle()
633 */
634static int gk20a_do_idle(void *_g)
635{
636 struct gk20a *g = (struct gk20a *)_g;
637
638 return __gk20a_do_idle(g, true);
639}
640
641/**
642 * __gk20a_do_unidle() - unblock all the tasks blocked by __gk20a_do_idle()
643 */
644int __gk20a_do_unidle(struct gk20a *g)
645{
646 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
647 struct device *dev = dev_from_gk20a(g);
648 struct gk20a_platform *platform = dev_get_drvdata(dev);
649 int err;
650
651 if (g->forced_reset) {
652 /*
653 * If we did a forced-reset/railgate
654 * then unrailgate the GPU here first
655 */
656 platform->unrailgate(dev);
657
658 /* restore the GPU state */
659 err = gk20a_pm_finalize_poweron(dev);
660 if (err)
661 return err;
662
663 /* balance GPU usage counter */
664 pm_runtime_put_sync(dev);
665
666 g->forced_reset = false;
667 }
668
669 /* release the lock and open up all other busy() calls */
670 nvgpu_mutex_release(&platform->railgate_lock);
671 up_write(&l->busy_lock);
672
673 gk20a_channel_deterministic_unidle(g);
674
675 return 0;
676}
677
678/**
679 * gk20a_do_unidle() - wrap up for __gk20a_do_unidle()
680 */
681static int gk20a_do_unidle(void *_g)
682{
683 struct gk20a *g = (struct gk20a *)_g;
684
685 return __gk20a_do_unidle(g);
686}
687#endif
688
689void __iomem *nvgpu_devm_ioremap_resource(struct platform_device *dev, int i,
690 struct resource **out)
691{
692 struct resource *r = platform_get_resource(dev, IORESOURCE_MEM, i);
693
694 if (!r)
695 return NULL;
696 if (out)
697 *out = r;
698 return devm_ioremap_resource(&dev->dev, r);
699}
700
701void __iomem *nvgpu_devm_ioremap(struct device *dev, resource_size_t offset,
702 resource_size_t size)
703{
704 return devm_ioremap(dev, offset, size);
705}
706
707u64 nvgpu_resource_addr(struct platform_device *dev, int i)
708{
709 struct resource *r = platform_get_resource(dev, IORESOURCE_MEM, i);
710
711 if (!r)
712 return 0;
713
714 return r->start;
715}
716
717static irqreturn_t gk20a_intr_isr_stall(int irq, void *dev_id)
718{
719 struct gk20a *g = dev_id;
720
721 return nvgpu_intr_stall(g);
722}
723
724static irqreturn_t gk20a_intr_isr_nonstall(int irq, void *dev_id)
725{
726 struct gk20a *g = dev_id;
727
728 return nvgpu_intr_nonstall(g);
729}
730
731static irqreturn_t gk20a_intr_thread_stall(int irq, void *dev_id)
732{
733 struct gk20a *g = dev_id;
734
735 return nvgpu_intr_thread_stall(g);
736}
737
738void gk20a_remove_support(struct gk20a *g)
739{
740 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
741 struct sim_nvgpu_linux *sim_linux;
742
743 tegra_unregister_idle_unidle(gk20a_do_idle);
744
745 nvgpu_kfree(g, g->dbg_regops_tmp_buf);
746
747 nvgpu_remove_channel_support_linux(l);
748
749 if (g->pmu.remove_support)
750 g->pmu.remove_support(&g->pmu);
751
752 if (g->acr.remove_support != NULL) {
753 g->acr.remove_support(&g->acr);
754 }
755
756 if (g->gr.remove_support)
757 g->gr.remove_support(&g->gr);
758
759 if (g->mm.remove_ce_support)
760 g->mm.remove_ce_support(&g->mm);
761
762 if (g->fifo.remove_support)
763 g->fifo.remove_support(&g->fifo);
764
765 if (g->mm.remove_support)
766 g->mm.remove_support(&g->mm);
767
768 if (g->sim) {
769 sim_linux = container_of(g->sim, struct sim_nvgpu_linux, sim);
770 if (g->sim->remove_support)
771 g->sim->remove_support(g);
772 if (sim_linux->remove_support_linux)
773 sim_linux->remove_support_linux(g);
774 }
775
776 nvgpu_remove_usermode_support(g);
777
778 nvgpu_free_enabled_flags(g);
779
780 gk20a_lockout_registers(g);
781}
782
783static int gk20a_init_support(struct platform_device *pdev)
784{
785 struct device *dev = &pdev->dev;
786 struct gk20a *g = get_gk20a(dev);
787 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
788 int err = -ENOMEM;
789
790 tegra_register_idle_unidle(gk20a_do_idle, gk20a_do_unidle, g);
791
792 l->regs = nvgpu_devm_ioremap_resource(pdev,
793 GK20A_BAR0_IORESOURCE_MEM,
794 &l->reg_mem);
795 if (IS_ERR(l->regs)) {
796 nvgpu_err(g, "failed to remap gk20a registers");
797 err = PTR_ERR(l->regs);
798 goto fail;
799 }
800
801 l->regs_bus_addr = nvgpu_resource_addr(pdev,
802 GK20A_BAR0_IORESOURCE_MEM);
803 if (!l->regs_bus_addr) {
804 nvgpu_err(g, "failed to read register bus offset");
805 err = -ENODEV;
806 goto fail;
807 }
808
809 l->bar1 = nvgpu_devm_ioremap_resource(pdev,
810 GK20A_BAR1_IORESOURCE_MEM,
811 &l->bar1_mem);
812 if (IS_ERR(l->bar1)) {
813 nvgpu_err(g, "failed to remap gk20a bar1");
814 err = PTR_ERR(l->bar1);
815 goto fail;
816 }
817
818 err = nvgpu_init_sim_support_linux(g, pdev);
819 if (err)
820 goto fail;
821 err = nvgpu_init_sim_support(g);
822 if (err)
823 goto fail_sim;
824
825 nvgpu_init_usermode_support(g);
826 return 0;
827
828fail_sim:
829 nvgpu_remove_sim_support_linux(g);
830fail:
831 if (l->regs)
832 l->regs = NULL;
833
834 if (l->bar1)
835 l->bar1 = NULL;
836
837 return err;
838}
839
840static int gk20a_pm_railgate(struct device *dev)
841{
842 struct gk20a_platform *platform = dev_get_drvdata(dev);
843 int ret = 0;
844 struct gk20a *g = get_gk20a(dev);
845
846 /* return early if platform didn't implement railgate */
847 if (!platform->railgate)
848 return 0;
849
850 /* if platform is already railgated, then just return */
851 if (platform->is_railgated && platform->is_railgated(dev))
852 return ret;
853
854#ifdef CONFIG_DEBUG_FS
855 g->pstats.last_rail_gate_start = jiffies;
856
857 if (g->pstats.railgating_cycle_count >= 1)
858 g->pstats.total_rail_ungate_time_ms =
859 g->pstats.total_rail_ungate_time_ms +
860 jiffies_to_msecs(g->pstats.last_rail_gate_start -
861 g->pstats.last_rail_ungate_complete);
862#endif
863
864 ret = platform->railgate(dev);
865 if (ret) {
866 nvgpu_err(g, "failed to railgate platform, err=%d", ret);
867 return ret;
868 }
869
870#ifdef CONFIG_DEBUG_FS
871 g->pstats.last_rail_gate_complete = jiffies;
872#endif
873 ret = tegra_fuse_clock_disable();
874 if (ret)
875 nvgpu_err(g, "failed to disable tegra fuse clock, err=%d", ret);
876
877 return ret;
878}
879
880static int gk20a_pm_unrailgate(struct device *dev)
881{
882 struct gk20a_platform *platform = dev_get_drvdata(dev);
883 int ret = 0;
884 struct gk20a *g = get_gk20a(dev);
885
886 /* return early if platform didn't implement unrailgate */
887 if (!platform->unrailgate)
888 return 0;
889
890 ret = tegra_fuse_clock_enable();
891 if (ret) {
892 nvgpu_err(g, "failed to enable tegra fuse clock, err=%d", ret);
893 return ret;
894 }
895#ifdef CONFIG_DEBUG_FS
896 g->pstats.last_rail_ungate_start = jiffies;
897 if (g->pstats.railgating_cycle_count >= 1)
898 g->pstats.total_rail_gate_time_ms =
899 g->pstats.total_rail_gate_time_ms +
900 jiffies_to_msecs(g->pstats.last_rail_ungate_start -
901 g->pstats.last_rail_gate_complete);
902
903 g->pstats.railgating_cycle_count++;
904#endif
905
906 trace_gk20a_pm_unrailgate(dev_name(dev));
907
908 nvgpu_mutex_acquire(&platform->railgate_lock);
909 ret = platform->unrailgate(dev);
910 nvgpu_mutex_release(&platform->railgate_lock);
911
912#ifdef CONFIG_DEBUG_FS
913 g->pstats.last_rail_ungate_complete = jiffies;
914#endif
915
916 return ret;
917}
918
919/*
920 * Remove association of the driver with OS interrupt handler
921 */
922void nvgpu_free_irq(struct gk20a *g)
923{
924 struct device *dev = dev_from_gk20a(g);
925
926 devm_free_irq(dev, g->irq_stall, g);
927 if (g->irq_stall != g->irq_nonstall)
928 devm_free_irq(dev, g->irq_nonstall, g);
929}
930
931/*
932 * Idle the GPU in preparation of shutdown/remove.
933 * gk20a_driver_start_unload() does not idle the GPU, but instead changes the SW
934 * state to prevent further activity on the driver SW side.
935 * On driver removal quiesce() should be called after start_unload()
936 */
937int nvgpu_quiesce(struct gk20a *g)
938{
939 int err;
940 struct device *dev = dev_from_gk20a(g);
941
942 if (g->power_on) {
943 err = gk20a_wait_for_idle(g);
944 if (err) {
945 nvgpu_err(g, "failed to idle GPU, err=%d", err);
946 return err;
947 }
948
949 err = gk20a_fifo_disable_all_engine_activity(g, true);
950 if (err) {
951 nvgpu_err(g,
952 "failed to disable engine activity, err=%d",
953 err);
954 return err;
955 }
956
957 err = gk20a_fifo_wait_engine_idle(g);
958 if (err) {
959 nvgpu_err(g, "failed to idle engines, err=%d",
960 err);
961 return err;
962 }
963 }
964
965 if (gk20a_gpu_is_virtual(dev))
966 err = vgpu_pm_prepare_poweroff(dev);
967 else
968 err = gk20a_pm_prepare_poweroff(dev);
969
970 if (err)
971 nvgpu_err(g, "failed to prepare for poweroff, err=%d",
972 err);
973
974 return err;
975}
976
977static void gk20a_pm_shutdown(struct platform_device *pdev)
978{
979 struct gk20a_platform *platform = platform_get_drvdata(pdev);
980 struct gk20a *g = platform->g;
981 int err;
982
983 nvgpu_info(g, "shutting down");
984
985 /* vgpu has nothing to clean up currently */
986 if (gk20a_gpu_is_virtual(&pdev->dev))
987 return;
988
989 if (!g->power_on)
990 goto finish;
991
992 gk20a_driver_start_unload(g);
993
994 /* If GPU is already railgated,
995 * just prevent more requests, and return */
996 if (platform->is_railgated && platform->is_railgated(&pdev->dev)) {
997 __pm_runtime_disable(&pdev->dev, false);
998 nvgpu_info(g, "already railgated, shut down complete");
999 return;
1000 }
1001
1002 /* Prevent more requests by disabling Runtime PM */
1003 __pm_runtime_disable(&pdev->dev, false);
1004
1005 err = nvgpu_quiesce(g);
1006 if (err)
1007 goto finish;
1008
1009 err = gk20a_pm_railgate(&pdev->dev);
1010 if (err)
1011 nvgpu_err(g, "failed to railgate, err=%d", err);
1012
1013finish:
1014 nvgpu_info(g, "shut down complete");
1015}
1016
1017#ifdef CONFIG_PM
1018static int gk20a_pm_runtime_resume(struct device *dev)
1019{
1020 int err = 0;
1021
1022 err = gk20a_pm_unrailgate(dev);
1023 if (err)
1024 goto fail;
1025
1026 if (gk20a_gpu_is_virtual(dev))
1027 err = vgpu_pm_finalize_poweron(dev);
1028 else
1029 err = gk20a_pm_finalize_poweron(dev);
1030 if (err)
1031 goto fail_poweron;
1032
1033 return 0;
1034
1035fail_poweron:
1036 gk20a_pm_railgate(dev);
1037fail:
1038 return err;
1039}
1040
1041static int gk20a_pm_runtime_suspend(struct device *dev)
1042{
1043 int err = 0;
1044 struct gk20a *g = get_gk20a(dev);
1045
1046 if (!g)
1047 return 0;
1048
1049 if (gk20a_gpu_is_virtual(dev))
1050 err = vgpu_pm_prepare_poweroff(dev);
1051 else
1052 err = gk20a_pm_prepare_poweroff(dev);
1053 if (err) {
1054 nvgpu_err(g, "failed to power off, err=%d", err);
1055 goto fail;
1056 }
1057
1058 err = gk20a_pm_railgate(dev);
1059 if (err)
1060 goto fail;
1061
1062 return 0;
1063
1064fail:
1065 gk20a_pm_finalize_poweron(dev);
1066 pm_runtime_mark_last_busy(dev);
1067 return err;
1068}
1069
1070static int gk20a_pm_suspend(struct device *dev)
1071{
1072 struct gk20a_platform *platform = dev_get_drvdata(dev);
1073 struct gk20a *g = get_gk20a(dev);
1074 int ret = 0;
1075 int usage_count;
1076 struct nvgpu_timeout timeout;
1077
1078 if (!g->power_on) {
1079 if (platform->suspend)
1080 ret = platform->suspend(dev);
1081
1082 if (ret)
1083 return ret;
1084
1085 if (!pm_runtime_enabled(dev))
1086 ret = gk20a_pm_railgate(dev);
1087
1088 return ret;
1089 }
1090
1091 nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS,
1092 NVGPU_TIMER_CPU_TIMER);
1093 /*
1094 * Hold back deterministic submits and changes to deterministic
1095 * channels - this must be outside the power busy locks.
1096 */
1097 gk20a_channel_deterministic_idle(g);
1098
1099 /* check and wait until GPU is idle (with a timeout) */
1100 do {
1101 nvgpu_usleep_range(1000, 1100);
1102 usage_count = nvgpu_atomic_read(&g->usage_count);
1103 } while (usage_count != 0 && !nvgpu_timeout_expired(&timeout));
1104
1105 if (usage_count != 0) {
1106 nvgpu_err(g, "failed to idle - usage_count %d", usage_count);
1107 ret = -EINVAL;
1108 goto fail_idle;
1109 }
1110
1111 ret = gk20a_pm_runtime_suspend(dev);
1112 if (ret)
1113 goto fail_idle;
1114
1115 if (platform->suspend)
1116 ret = platform->suspend(dev);
1117 if (ret)
1118 goto fail_suspend;
1119
1120 g->suspended = true;
1121
1122 return 0;
1123
1124fail_suspend:
1125 gk20a_pm_runtime_resume(dev);
1126fail_idle:
1127 gk20a_channel_deterministic_unidle(g);
1128 return ret;
1129}
1130
1131static int gk20a_pm_resume(struct device *dev)
1132{
1133 struct gk20a_platform *platform = dev_get_drvdata(dev);
1134 struct gk20a *g = get_gk20a(dev);
1135 int ret = 0;
1136
1137 if (!g->suspended) {
1138 if (platform->resume)
1139 ret = platform->resume(dev);
1140 if (ret)
1141 return ret;
1142
1143 if (!pm_runtime_enabled(dev))
1144 ret = gk20a_pm_unrailgate(dev);
1145
1146 return ret;
1147 }
1148
1149 if (platform->resume)
1150 ret = platform->resume(dev);
1151 if (ret)
1152 return ret;
1153
1154 ret = gk20a_pm_runtime_resume(dev);
1155 if (ret)
1156 return ret;
1157
1158 g->suspended = false;
1159
1160 gk20a_channel_deterministic_unidle(g);
1161
1162 return ret;
1163}
1164
1165static const struct dev_pm_ops gk20a_pm_ops = {
1166 .runtime_resume = gk20a_pm_runtime_resume,
1167 .runtime_suspend = gk20a_pm_runtime_suspend,
1168 .resume = gk20a_pm_resume,
1169 .suspend = gk20a_pm_suspend,
1170};
1171#endif
1172
1173static int gk20a_pm_init(struct device *dev)
1174{
1175 struct gk20a *g = get_gk20a(dev);
1176 int err = 0;
1177
1178 nvgpu_log_fn(g, " ");
1179
1180 /*
1181 * Initialise pm runtime. For railgate disable
1182 * case, set autosuspend delay to negative which
1183 * will suspend runtime pm
1184 */
1185 if (g->railgate_delay && nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE))
1186 pm_runtime_set_autosuspend_delay(dev,
1187 g->railgate_delay);
1188 else
1189 pm_runtime_set_autosuspend_delay(dev, -1);
1190
1191 pm_runtime_use_autosuspend(dev);
1192 pm_runtime_enable(dev);
1193
1194 return err;
1195}
1196
1197static int gk20a_pm_deinit(struct device *dev)
1198{
1199 pm_runtime_dont_use_autosuspend(dev);
1200 pm_runtime_disable(dev);
1201 return 0;
1202}
1203
1204/*
1205 * Start the process for unloading the driver. Set NVGPU_DRIVER_IS_DYING.
1206 */
1207void gk20a_driver_start_unload(struct gk20a *g)
1208{
1209 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
1210
1211 nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n");
1212
1213 down_write(&l->busy_lock);
1214 __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
1215 /* GR SW ready needs to be invalidated at this time with the busy lock
1216 * held to prevent a racing condition on the gr/mm code */
1217 g->gr.sw_ready = false;
1218 g->sw_ready = false;
1219 up_write(&l->busy_lock);
1220
1221 if (g->is_virtual)
1222 return;
1223
1224 gk20a_wait_for_idle(g);
1225
1226 nvgpu_wait_for_deferred_interrupts(g);
1227
1228 if (l->nonstall_work_queue) {
1229 cancel_work_sync(&l->nonstall_fn_work);
1230 destroy_workqueue(l->nonstall_work_queue);
1231 l->nonstall_work_queue = NULL;
1232 }
1233}
1234
1235static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a)
1236{
1237 gk20a_get_platform(&pdev->dev)->g = gk20a;
1238}
1239
1240static int nvgpu_read_fuse_overrides(struct gk20a *g)
1241{
1242 struct device_node *np = nvgpu_get_node(g);
1243 struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
1244 u32 *fuses;
1245 int count, i;
1246
1247 if (!np) /* may be pcie device */
1248 return 0;
1249
1250 count = of_property_count_elems_of_size(np, "fuse-overrides", 8);
1251 if (count <= 0)
1252 return count;
1253
1254 fuses = nvgpu_kmalloc(g, sizeof(u32) * count * 2);
1255 if (!fuses)
1256 return -ENOMEM;
1257 of_property_read_u32_array(np, "fuse-overrides", fuses, count * 2);
1258 for (i = 0; i < count; i++) {
1259 u32 fuse, value;
1260
1261 fuse = fuses[2 * i];
1262 value = fuses[2 * i + 1];
1263 switch (fuse) {
1264 case GM20B_FUSE_OPT_TPC_DISABLE:
1265 g->tpc_fs_mask_user = ~value;
1266 break;
1267 case GP10B_FUSE_OPT_ECC_EN:
1268 g->gr.fecs_feature_override_ecc_val = value;
1269 break;
1270 case GV11B_FUSE_OPT_TPC_DISABLE:
1271 if (platform->set_tpc_pg_mask != NULL)
1272 platform->set_tpc_pg_mask(dev_from_gk20a(g),
1273 value);
1274 break;
1275 default:
1276 nvgpu_err(g, "ignore unknown fuse override %08x", fuse);
1277 break;
1278 }
1279 }
1280
1281 nvgpu_kfree(g, fuses);
1282
1283 return 0;
1284}
1285
1286static int gk20a_probe(struct platform_device *dev)
1287{
1288 struct nvgpu_os_linux *l = NULL;
1289 struct gk20a *gk20a;
1290 int err;
1291 struct gk20a_platform *platform = NULL;
1292 struct device_node *np;
1293
1294 if (dev->dev.of_node) {
1295 const struct of_device_id *match;
1296
1297 match = of_match_device(tegra_gk20a_of_match, &dev->dev);
1298 if (match)
1299 platform = (struct gk20a_platform *)match->data;
1300 } else
1301 platform = (struct gk20a_platform *)dev->dev.platform_data;
1302
1303 if (!platform) {
1304 dev_err(&dev->dev, "no platform data\n");
1305 return -ENODATA;
1306 }
1307
1308 platform_set_drvdata(dev, platform);
1309
1310 if (gk20a_gpu_is_virtual(&dev->dev))
1311 return vgpu_probe(dev);
1312
1313 l = kzalloc(sizeof(*l), GFP_KERNEL);
1314 if (!l) {
1315 dev_err(&dev->dev, "couldn't allocate gk20a support");
1316 return -ENOMEM;
1317 }
1318
1319 hash_init(l->ecc_sysfs_stats_htable);
1320
1321 gk20a = &l->g;
1322
1323 nvgpu_log_fn(gk20a, " ");
1324
1325 nvgpu_init_gk20a(gk20a);
1326 set_gk20a(dev, gk20a);
1327 l->dev = &dev->dev;
1328 gk20a->log_mask = NVGPU_DEFAULT_DBG_MASK;
1329
1330 nvgpu_kmem_init(gk20a);
1331
1332 err = nvgpu_init_enabled_flags(gk20a);
1333 if (err)
1334 goto return_err;
1335
1336 np = nvgpu_get_node(gk20a);
1337 if (of_dma_is_coherent(np)) {
1338 __nvgpu_set_enabled(gk20a, NVGPU_USE_COHERENT_SYSMEM, true);
1339 __nvgpu_set_enabled(gk20a, NVGPU_SUPPORT_IO_COHERENCE, true);
1340 }
1341
1342 if (nvgpu_platform_is_simulation(gk20a))
1343 __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
1344
1345 gk20a->irq_stall = platform_get_irq(dev, 0);
1346 gk20a->irq_nonstall = platform_get_irq(dev, 1);
1347 if (gk20a->irq_stall < 0 || gk20a->irq_nonstall < 0) {
1348 err = -ENXIO;
1349 goto return_err;
1350 }
1351
1352 err = devm_request_threaded_irq(&dev->dev,
1353 gk20a->irq_stall,
1354 gk20a_intr_isr_stall,
1355 gk20a_intr_thread_stall,
1356 0, "gk20a_stall", gk20a);
1357 if (err) {
1358 dev_err(&dev->dev,
1359 "failed to request stall intr irq @ %d\n",
1360 gk20a->irq_stall);
1361 goto return_err;
1362 }
1363 err = devm_request_irq(&dev->dev,
1364 gk20a->irq_nonstall,
1365 gk20a_intr_isr_nonstall,
1366 0, "gk20a_nonstall", gk20a);
1367 if (err) {
1368 dev_err(&dev->dev,
1369 "failed to request non-stall intr irq @ %d\n",
1370 gk20a->irq_nonstall);
1371 goto return_err;
1372 }
1373 disable_irq(gk20a->irq_stall);
1374 if (gk20a->irq_stall != gk20a->irq_nonstall)
1375 disable_irq(gk20a->irq_nonstall);
1376
1377 err = gk20a_init_support(dev);
1378 if (err)
1379 goto return_err;
1380
1381 err = nvgpu_read_fuse_overrides(gk20a);
1382
1383#ifdef CONFIG_RESET_CONTROLLER
1384 platform->reset_control = devm_reset_control_get(&dev->dev, NULL);
1385 if (IS_ERR(platform->reset_control))
1386 platform->reset_control = NULL;
1387#endif
1388
1389 err = nvgpu_probe(gk20a, "gpu.0", INTERFACE_NAME, &nvgpu_class);
1390 if (err)
1391 goto return_err;
1392
1393 err = gk20a_pm_init(&dev->dev);
1394 if (err) {
1395 dev_err(&dev->dev, "pm init failed");
1396 goto return_err;
1397 }
1398
1399#ifdef CONFIG_NVGPU_SUPPORT_LINUX_ECC_ERROR_REPORTING
1400 nvgpu_init_ecc_reporting(gk20a);
1401#endif
1402
1403 gk20a->nvgpu_reboot_nb.notifier_call =
1404 nvgpu_kernel_shutdown_notification;
1405 err = register_reboot_notifier(&gk20a->nvgpu_reboot_nb);
1406 if (err)
1407 goto return_err;
1408
1409 return 0;
1410
1411return_err:
1412 nvgpu_free_enabled_flags(gk20a);
1413
1414 /*
1415 * Last since the above allocs may use data structures in here.
1416 */
1417 nvgpu_kmem_fini(gk20a, NVGPU_KMEM_FINI_FORCE_CLEANUP);
1418
1419 kfree(l);
1420
1421 return err;
1422}
1423
1424int nvgpu_remove(struct device *dev, struct class *class)
1425{
1426 struct gk20a *g = get_gk20a(dev);
1427#ifdef CONFIG_NVGPU_SUPPORT_CDE
1428 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
1429#endif
1430 struct gk20a_platform *platform = gk20a_get_platform(dev);
1431 int err;
1432
1433 nvgpu_log_fn(g, " ");
1434
1435 err = nvgpu_quiesce(g);
1436 WARN(err, "gpu failed to idle during driver removal");
1437
1438 if (nvgpu_mem_is_valid(&g->syncpt_mem))
1439 nvgpu_dma_free(g, &g->syncpt_mem);
1440
1441#ifdef CONFIG_NVGPU_SUPPORT_CDE
1442 if (platform->has_cde)
1443 gk20a_cde_destroy(l);
1444#endif
1445
1446#ifdef CONFIG_GK20A_CTXSW_TRACE
1447 gk20a_ctxsw_trace_cleanup(g);
1448#endif
1449
1450 gk20a_sched_ctrl_cleanup(g);
1451
1452 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
1453 gk20a_scale_exit(dev);
1454
1455 nvgpu_clk_arb_cleanup_arbiter(g);
1456
1457 gk20a_user_deinit(dev, class);
1458
1459 gk20a_debug_deinit(g);
1460
1461 nvgpu_remove_sysfs(dev);
1462
1463 if (platform->secure_buffer.destroy)
1464 platform->secure_buffer.destroy(g,
1465 &platform->secure_buffer);
1466
1467 if (platform->remove)
1468 platform->remove(dev);
1469
1470 nvgpu_mutex_destroy(&g->clk_arb_enable_lock);
1471
1472 nvgpu_log_fn(g, "removed");
1473
1474 return err;
1475}
1476
1477static int __exit gk20a_remove(struct platform_device *pdev)
1478{
1479 int err;
1480 struct device *dev = &pdev->dev;
1481 struct gk20a *g = get_gk20a(dev);
1482
1483 if (gk20a_gpu_is_virtual(dev))
1484 return vgpu_remove(pdev);
1485
1486 err = nvgpu_remove(dev, &nvgpu_class);
1487
1488 unregister_reboot_notifier(&g->nvgpu_reboot_nb);
1489
1490 set_gk20a(pdev, NULL);
1491
1492 gk20a_put(g);
1493
1494 gk20a_pm_deinit(dev);
1495
1496 return err;
1497}
1498
1499static struct platform_driver gk20a_driver = {
1500 .probe = gk20a_probe,
1501 .remove = __exit_p(gk20a_remove),
1502 .shutdown = gk20a_pm_shutdown,
1503 .driver = {
1504 .owner = THIS_MODULE,
1505 .name = "gk20a",
1506 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1507#ifdef CONFIG_OF
1508 .of_match_table = tegra_gk20a_of_match,
1509#endif
1510#ifdef CONFIG_PM
1511 .pm = &gk20a_pm_ops,
1512#endif
1513 .suppress_bind_attrs = true,
1514 }
1515};
1516
1517struct class nvgpu_class = {
1518 .owner = THIS_MODULE,
1519 .name = CLASS_NAME,
1520};
1521
1522static int __init gk20a_init(void)
1523{
1524
1525 int ret;
1526
1527 ret = class_register(&nvgpu_class);
1528 if (ret)
1529 return ret;
1530
1531 ret = nvgpu_pci_init();
1532 if (ret)
1533 return ret;
1534
1535 return platform_driver_register(&gk20a_driver);
1536}
1537
1538static void __exit gk20a_exit(void)
1539{
1540 nvgpu_pci_exit();
1541 platform_driver_unregister(&gk20a_driver);
1542 class_unregister(&nvgpu_class);
1543}
1544
1545MODULE_LICENSE("GPL v2");
1546module_init(gk20a_init);
1547module_exit(gk20a_exit);