aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/ioctl_ctrl.c
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2023-10-29 13:07:40 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2023-10-29 13:10:52 -0400
commit2c5337a24f7f2d02989dfb733c55d6d8c7e90493 (patch)
treeb9f1028cb443b03190b710c0d7ee640bf5958631 /include/os/linux/ioctl_ctrl.c
parentaa06f84f03cba7ad1aae5cd527355bb3d8c152a6 (diff)
Update includes to L4T r32.7.4 and drop nvgpu/gk20a.h dependency
Also add instructions for updating `include/`. These files are now only needed to build on Linux 4.9-based Tegra platforms.
Diffstat (limited to 'include/os/linux/ioctl_ctrl.c')
-rw-r--r--include/os/linux/ioctl_ctrl.c122
1 files changed, 80 insertions, 42 deletions
diff --git a/include/os/linux/ioctl_ctrl.c b/include/os/linux/ioctl_ctrl.c
index ee141ff..841d345 100644
--- a/include/os/linux/ioctl_ctrl.c
+++ b/include/os/linux/ioctl_ctrl.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2011-2020, NVIDIA Corporation. All rights reserved. 2 * Copyright (c) 2011-2021, NVIDIA Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -60,7 +60,6 @@ struct gk20a_ctrl_priv {
60 struct nvgpu_list_node list; 60 struct nvgpu_list_node list;
61 struct { 61 struct {
62 struct vm_area_struct *vma; 62 struct vm_area_struct *vma;
63 unsigned long flags;
64 bool vma_mapped; 63 bool vma_mapped;
65 } usermode_vma; 64 } usermode_vma;
66}; 65};
@@ -488,27 +487,26 @@ static int gk20a_ctrl_alloc_as(
488 487
489 snprintf(name, sizeof(name), "nvhost-%s-fd%d", g->name, fd); 488 snprintf(name, sizeof(name), "nvhost-%s-fd%d", g->name, fd);
490 489
491 file = anon_inode_getfile(name, l->as_dev.cdev.ops, NULL, O_RDWR);
492 if (IS_ERR(file)) {
493 err = PTR_ERR(file);
494 goto clean_up;
495 }
496
497 err = gk20a_as_alloc_share(g, args->big_page_size, 490 err = gk20a_as_alloc_share(g, args->big_page_size,
498 gk20a_as_translate_as_alloc_flags(g, 491 gk20a_as_translate_as_alloc_flags(g,
499 args->flags), 492 args->flags),
500 &as_share); 493 &as_share);
501 if (err) 494 if (err)
502 goto clean_up_file; 495 goto clean_up;
496
497 file = anon_inode_getfile(name, l->as_dev.cdev.ops, as_share, O_RDWR);
498 if (IS_ERR(file)) {
499 err = PTR_ERR(file);
500 goto clean_up_as;
501 }
503 502
504 fd_install(fd, file); 503 fd_install(fd, file);
505 file->private_data = as_share;
506 504
507 args->as_fd = fd; 505 args->as_fd = fd;
508 return 0; 506 return 0;
509 507
510clean_up_file: 508clean_up_as:
511 fput(file); 509 gk20a_as_release_share(as_share);
512clean_up: 510clean_up:
513 put_unused_fd(fd); 511 put_unused_fd(fd);
514 return err; 512 return err;
@@ -692,12 +690,15 @@ static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g)
692 690
693 err = gk20a_busy(g); 691 err = gk20a_busy(g);
694 if (err) 692 if (err)
695 return err; 693 return err;
696 694
697 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 695 if (g->ops.gr.trigger_suspend) {
698 err = gr_gk20a_elpg_protected_call(g, 696 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
697 err = gr_gk20a_elpg_protected_call(g,
699 g->ops.gr.trigger_suspend(g)); 698 g->ops.gr.trigger_suspend(g));
700 nvgpu_mutex_release(&g->dbg_sessions_lock); 699 nvgpu_mutex_release(&g->dbg_sessions_lock);
700 } else
701 err = -EINVAL;
701 702
702 gk20a_idle(g); 703 gk20a_idle(g);
703 704
@@ -731,8 +732,13 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
731 goto out_free; 732 goto out_free;
732 733
733 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 734 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
734 (void)gr_gk20a_elpg_protected_call(g, 735 if (g->ops.gr.wait_for_pause) {
736 (void)gr_gk20a_elpg_protected_call(g,
735 g->ops.gr.wait_for_pause(g, w_state)); 737 g->ops.gr.wait_for_pause(g, w_state));
738 } else {
739 err = -EINVAL;
740 goto out_idle;
741 }
736 742
737 for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { 743 for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) {
738 ioctl_w_state[sm_id].valid_warps[0] = 744 ioctl_w_state[sm_id].valid_warps[0] =
@@ -755,6 +761,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g,
755 err = -EFAULT; 761 err = -EFAULT;
756 } 762 }
757 763
764out_idle:
758 nvgpu_mutex_release(&g->dbg_sessions_lock); 765 nvgpu_mutex_release(&g->dbg_sessions_lock);
759 766
760 gk20a_idle(g); 767 gk20a_idle(g);
@@ -772,12 +779,15 @@ static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g)
772 779
773 err = gk20a_busy(g); 780 err = gk20a_busy(g);
774 if (err) 781 if (err)
775 return err; 782 return err;
776 783
777 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 784 if (g->ops.gr.resume_from_pause) {
778 err = gr_gk20a_elpg_protected_call(g, 785 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
786 err = gr_gk20a_elpg_protected_call(g,
779 g->ops.gr.resume_from_pause(g)); 787 g->ops.gr.resume_from_pause(g));
780 nvgpu_mutex_release(&g->dbg_sessions_lock); 788 nvgpu_mutex_release(&g->dbg_sessions_lock);
789 } else
790 err = -EINVAL;
781 791
782 gk20a_idle(g); 792 gk20a_idle(g);
783 793
@@ -792,8 +802,11 @@ static int nvgpu_gpu_ioctl_clear_sm_errors(struct gk20a *g)
792 if (err) 802 if (err)
793 return err; 803 return err;
794 804
795 err = gr_gk20a_elpg_protected_call(g, 805 if (g->ops.gr.clear_sm_errors) {
806 err = gr_gk20a_elpg_protected_call(g,
796 g->ops.gr.clear_sm_errors(g)); 807 g->ops.gr.clear_sm_errors(g));
808 } else
809 err = -EINVAL;
797 810
798 gk20a_idle(g); 811 gk20a_idle(g);
799 812
@@ -806,9 +819,12 @@ static int nvgpu_gpu_ioctl_has_any_exception(
806{ 819{
807 u32 tpc_exception_en; 820 u32 tpc_exception_en;
808 821
809 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 822 if (g->ops.gr.tpc_enabled_exceptions) {
810 tpc_exception_en = g->ops.gr.tpc_enabled_exceptions(g); 823 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
811 nvgpu_mutex_release(&g->dbg_sessions_lock); 824 tpc_exception_en = g->ops.gr.tpc_enabled_exceptions(g);
825 nvgpu_mutex_release(&g->dbg_sessions_lock);
826 } else
827 return -EINVAL;
812 828
813 args->tpc_exception_en_sm_mask = tpc_exception_en; 829 args->tpc_exception_en_sm_mask = tpc_exception_en;
814 830
@@ -2023,7 +2039,6 @@ int gk20a_ctrl_dev_mmap(struct file *filp, struct vm_area_struct *vma)
2023 vma->vm_end - vma->vm_start, vma->vm_page_prot); 2039 vma->vm_end - vma->vm_start, vma->vm_page_prot);
2024 if (!err) { 2040 if (!err) {
2025 priv->usermode_vma.vma = vma; 2041 priv->usermode_vma.vma = vma;
2026 priv->usermode_vma.flags = vma->vm_flags;
2027 vma->vm_private_data = priv; 2042 vma->vm_private_data = priv;
2028 priv->usermode_vma.vma_mapped = true; 2043 priv->usermode_vma.vma_mapped = true;
2029 } 2044 }
@@ -2034,7 +2049,7 @@ int gk20a_ctrl_dev_mmap(struct file *filp, struct vm_area_struct *vma)
2034 return err; 2049 return err;
2035} 2050}
2036 2051
2037static void alter_usermode_mapping(struct gk20a *g, 2052static int alter_usermode_mapping(struct gk20a *g,
2038 struct gk20a_ctrl_priv *priv, 2053 struct gk20a_ctrl_priv *priv,
2039 bool poweroff) 2054 bool poweroff)
2040{ 2055{
@@ -2042,57 +2057,80 @@ static void alter_usermode_mapping(struct gk20a *g,
2042 struct vm_area_struct *vma = priv->usermode_vma.vma; 2057 struct vm_area_struct *vma = priv->usermode_vma.vma;
2043 bool vma_mapped = priv->usermode_vma.vma_mapped; 2058 bool vma_mapped = priv->usermode_vma.vma_mapped;
2044 u64 addr; 2059 u64 addr;
2045 int err; 2060 int err = 0;
2046 2061
2047 if (!vma) { 2062 if (!vma) {
2048 /* Nothing to do - no mmap called */ 2063 /* Nothing to do - no mmap called */
2049 return; 2064 return 0;
2050 } 2065 }
2051 2066
2052 addr = l->regs_bus_addr + g->ops.fifo.usermode_base(g); 2067 addr = l->regs_bus_addr + g->ops.fifo.usermode_base(g);
2053 2068
2054 down_write(&vma->vm_mm->mmap_sem);
2055
2056 /* 2069 /*
2057 * This is a no-op for the below cases 2070 * This is a no-op for the below cases
2058 * a) poweroff and !vma_mapped - > do nothing as no map exists 2071 * a) poweroff and !vma_mapped - > do nothing as no map exists
2059 * b) !poweroff and vmap_mapped -> do nothing as already mapped 2072 * b) !poweroff and vmap_mapped -> do nothing as already mapped
2060 */ 2073 */
2061 if (poweroff && vma_mapped) { 2074 if (poweroff != vma_mapped) {
2075 return 0;
2076 }
2077
2078 /*
2079 * We use trylock due to lock inversion: we need to acquire
2080 * mmap_lock while holding ctrl_privs_lock. usermode_vma_close
2081 * does it in reverse order. Trylock is a way to avoid deadlock.
2082 */
2083 if (!down_write_trylock(&vma->vm_mm->mmap_sem)) {
2084 return -EBUSY;
2085 }
2086
2087 if (poweroff) {
2062 err = zap_vma_ptes(vma, vma->vm_start, SZ_4K); 2088 err = zap_vma_ptes(vma, vma->vm_start, SZ_4K);
2063 if (err == 0) { 2089 if (err == 0) {
2064 vma->vm_flags = VM_NONE;
2065 priv->usermode_vma.vma_mapped = false; 2090 priv->usermode_vma.vma_mapped = false;
2066 } else { 2091 } else {
2067 nvgpu_err(g, "can't remove usermode mapping"); 2092 nvgpu_err(g, "can't remove usermode mapping");
2068 } 2093 }
2069 } else if (!poweroff && !vma_mapped) { 2094 } else {
2070 vma->vm_flags = priv->usermode_vma.flags;
2071 err = io_remap_pfn_range(vma, vma->vm_start, 2095 err = io_remap_pfn_range(vma, vma->vm_start,
2072 addr >> PAGE_SHIFT, 2096 addr >> PAGE_SHIFT,
2073 SZ_4K, vma->vm_page_prot); 2097 SZ_4K, vma->vm_page_prot);
2074 if (err != 0) { 2098 if (err != 0) {
2075 nvgpu_err(g, "can't restore usermode mapping"); 2099 nvgpu_err(g, "can't restore usermode mapping");
2076 vma->vm_flags = VM_NONE;
2077 } else { 2100 } else {
2078 priv->usermode_vma.vma_mapped = true; 2101 priv->usermode_vma.vma_mapped = true;
2079 } 2102 }
2080 } 2103 }
2081 2104
2082 up_write(&vma->vm_mm->mmap_sem); 2105 up_write(&vma->vm_mm->mmap_sem);
2106
2107 return err;
2083} 2108}
2084 2109
2085static void alter_usermode_mappings(struct gk20a *g, bool poweroff) 2110static void alter_usermode_mappings(struct gk20a *g, bool poweroff)
2086{ 2111{
2087 struct gk20a_ctrl_priv *priv; 2112 struct gk20a_ctrl_priv *priv;
2088 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 2113 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
2114 int err = 0;
2089 2115
2090 nvgpu_mutex_acquire(&l->ctrl.privs_lock); 2116 do {
2091 nvgpu_list_for_each_entry(priv, &l->ctrl.privs, 2117 nvgpu_mutex_acquire(&l->ctrl.privs_lock);
2092 gk20a_ctrl_priv, list) { 2118 nvgpu_list_for_each_entry(priv, &l->ctrl.privs,
2093 alter_usermode_mapping(g, priv, poweroff); 2119 gk20a_ctrl_priv, list) {
2094 } 2120 err = alter_usermode_mapping(g, priv, poweroff);
2095 nvgpu_mutex_release(&l->ctrl.privs_lock); 2121 if (err != 0) {
2122 break;
2123 }
2124 }
2125 nvgpu_mutex_release(&l->ctrl.privs_lock);
2126
2127 if (err == -EBUSY) {
2128 nvgpu_log_info(g, "ctrl_privs_lock lock contended. retry altering usermode mappings");
2129 nvgpu_udelay(10);
2130 } else if (err != 0) {
2131 nvgpu_err(g, "can't alter usermode mapping. err = %d", err);
2132 }
2133 } while (err == -EBUSY);
2096} 2134}
2097 2135
2098void nvgpu_hide_usermode_for_poweroff(struct gk20a *g) 2136void nvgpu_hide_usermode_for_poweroff(struct gk20a *g)