summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c260
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.h2
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg.c77
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c23
4 files changed, 85 insertions, 277 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 5319b829..65d560c7 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -709,262 +709,6 @@ notif_clean_up:
709 return ret; 709 return ret;
710} 710}
711 711
712static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
713{
714 unsigned int mask = 0;
715 struct gk20a_event_id_data *event_id_data = filep->private_data;
716 struct gk20a *g = event_id_data->g;
717 u32 event_id = event_id_data->event_id;
718
719 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
720
721 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
722
723 nvgpu_mutex_acquire(&event_id_data->lock);
724
725 if (event_id_data->is_tsg) {
726 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
727
728 if (event_id_data->event_posted) {
729 gk20a_dbg_info(
730 "found pending event_id=%d on TSG=%d\n",
731 event_id, tsg->tsgid);
732 mask = (POLLPRI | POLLIN);
733 event_id_data->event_posted = false;
734 }
735 } else {
736 struct channel_gk20a *ch = g->fifo.channel
737 + event_id_data->id;
738
739 if (event_id_data->event_posted) {
740 gk20a_dbg_info(
741 "found pending event_id=%d on chid=%d\n",
742 event_id, ch->chid);
743 mask = (POLLPRI | POLLIN);
744 event_id_data->event_posted = false;
745 }
746 }
747
748 nvgpu_mutex_release(&event_id_data->lock);
749
750 return mask;
751}
752
753static int gk20a_event_id_release(struct inode *inode, struct file *filp)
754{
755 struct gk20a_event_id_data *event_id_data = filp->private_data;
756 struct gk20a *g = event_id_data->g;
757
758 if (event_id_data->is_tsg) {
759 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
760
761 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
762 nvgpu_list_del(&event_id_data->event_id_node);
763 nvgpu_mutex_release(&tsg->event_id_list_lock);
764 } else {
765 struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
766
767 nvgpu_mutex_acquire(&ch->event_id_list_lock);
768 nvgpu_list_del(&event_id_data->event_id_node);
769 nvgpu_mutex_release(&ch->event_id_list_lock);
770 }
771
772 nvgpu_mutex_destroy(&event_id_data->lock);
773 gk20a_put(g);
774 nvgpu_kfree(g, event_id_data);
775 filp->private_data = NULL;
776
777 return 0;
778}
779
780const struct file_operations gk20a_event_id_ops = {
781 .owner = THIS_MODULE,
782 .poll = gk20a_event_id_poll,
783 .release = gk20a_event_id_release,
784};
785
786static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
787 u32 event_id,
788 struct gk20a_event_id_data **event_id_data)
789{
790 struct gk20a_event_id_data *local_event_id_data;
791 bool event_found = false;
792
793 nvgpu_mutex_acquire(&ch->event_id_list_lock);
794 nvgpu_list_for_each_entry(local_event_id_data, &ch->event_id_list,
795 gk20a_event_id_data, event_id_node) {
796 if (local_event_id_data->event_id == event_id) {
797 event_found = true;
798 break;
799 }
800 }
801 nvgpu_mutex_release(&ch->event_id_list_lock);
802
803 if (event_found) {
804 *event_id_data = local_event_id_data;
805 return 0;
806 } else {
807 return -1;
808 }
809}
810
811/*
812 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
813 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
814 */
815u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
816{
817 switch (event_id) {
818 case NVGPU_EVENT_ID_BPT_INT:
819 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
820 case NVGPU_EVENT_ID_BPT_PAUSE:
821 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
822 case NVGPU_EVENT_ID_BLOCKING_SYNC:
823 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
824 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
825 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
826 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
827 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
828 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
829 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
830 }
831
832 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
833}
834
835void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
836 u32 __event_id)
837{
838 struct gk20a_event_id_data *event_id_data;
839 u32 event_id;
840 int err = 0;
841
842 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
843 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
844 return;
845
846 err = gk20a_channel_get_event_data_from_id(ch, event_id,
847 &event_id_data);
848 if (err)
849 return;
850
851 nvgpu_mutex_acquire(&event_id_data->lock);
852
853 gk20a_dbg_info(
854 "posting event for event_id=%d on ch=%d\n",
855 event_id, ch->chid);
856 event_id_data->event_posted = true;
857
858 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
859
860 nvgpu_mutex_release(&event_id_data->lock);
861}
862
863static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
864 int event_id,
865 int *fd)
866{
867 struct gk20a *g;
868 int err = 0;
869 int local_fd;
870 struct file *file;
871 char name[64];
872 struct gk20a_event_id_data *event_id_data;
873
874 g = gk20a_get(ch->g);
875 if (!g)
876 return -ENODEV;
877
878 err = gk20a_channel_get_event_data_from_id(ch,
879 event_id, &event_id_data);
880 if (err == 0) {
881 /* We already have event enabled */
882 err = -EINVAL;
883 goto free_ref;
884 }
885
886 err = get_unused_fd_flags(O_RDWR);
887 if (err < 0)
888 goto free_ref;
889 local_fd = err;
890
891 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
892 event_id, local_fd);
893 file = anon_inode_getfile(name, &gk20a_event_id_ops,
894 NULL, O_RDWR);
895 if (IS_ERR(file)) {
896 err = PTR_ERR(file);
897 goto clean_up;
898 }
899
900 event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
901 if (!event_id_data) {
902 err = -ENOMEM;
903 goto clean_up_file;
904 }
905 event_id_data->g = g;
906 event_id_data->id = ch->chid;
907 event_id_data->is_tsg = false;
908 event_id_data->event_id = event_id;
909
910 nvgpu_cond_init(&event_id_data->event_id_wq);
911 err = nvgpu_mutex_init(&event_id_data->lock);
912 if (err)
913 goto clean_up_free;
914 nvgpu_init_list_node(&event_id_data->event_id_node);
915
916 nvgpu_mutex_acquire(&ch->event_id_list_lock);
917 nvgpu_list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
918 nvgpu_mutex_release(&ch->event_id_list_lock);
919
920 fd_install(local_fd, file);
921 file->private_data = event_id_data;
922
923 *fd = local_fd;
924
925 return 0;
926
927clean_up_free:
928 nvgpu_kfree(g, event_id_data);
929clean_up_file:
930 fput(file);
931clean_up:
932 put_unused_fd(local_fd);
933free_ref:
934 gk20a_put(g);
935 return err;
936}
937
938static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
939 struct nvgpu_event_id_ctrl_args *args)
940{
941 int err = 0;
942 int fd = -1;
943
944 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
945 return -EINVAL;
946
947 if (gk20a_is_channel_marked_as_tsg(ch))
948 return -EINVAL;
949
950 switch (args->cmd) {
951 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
952 err = gk20a_channel_event_id_enable(ch, args->event_id, &fd);
953 if (!err)
954 args->event_fd = fd;
955 break;
956
957 default:
958 nvgpu_err(ch->g,
959 "unrecognized channel event id cmd: 0x%x",
960 args->cmd);
961 err = -EINVAL;
962 break;
963 }
964
965 return err;
966}
967
968static int gk20a_channel_zcull_bind(struct channel_gk20a *ch, 712static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
969 struct nvgpu_zcull_bind_args *args) 713 struct nvgpu_zcull_bind_args *args)
970{ 714{
@@ -1467,10 +1211,6 @@ long gk20a_channel_ioctl(struct file *filp,
1467 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true); 1211 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true);
1468 gk20a_idle(ch->g); 1212 gk20a_idle(ch->g);
1469 break; 1213 break;
1470 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
1471 err = gk20a_channel_event_id_ctrl(ch,
1472 (struct nvgpu_event_id_ctrl_args *)buf);
1473 break;
1474#ifdef CONFIG_GK20A_CYCLE_STATS 1214#ifdef CONFIG_GK20A_CYCLE_STATS
1475 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT: 1215 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
1476 err = gk20a_busy(ch->g); 1216 err = gk20a_busy(ch->g);
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.h b/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
index c37108c4..48cff1ea 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
@@ -39,10 +39,8 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
39int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch); 39int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
40void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch); 40void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
41 41
42extern const struct file_operations gk20a_event_id_ops;
43extern const struct file_operations gk20a_channel_ops; 42extern const struct file_operations gk20a_channel_ops;
44 43
45u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id);
46u32 nvgpu_get_common_runlist_level(u32 level); 44u32 nvgpu_get_common_runlist_level(u32 level);
47 45
48u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags); 46u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags);
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
index b17d7e74..445199c2 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
@@ -18,6 +18,7 @@
18#include <linux/file.h> 18#include <linux/file.h>
19#include <linux/cdev.h> 19#include <linux/cdev.h>
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/poll.h>
21#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
22#include <linux/anon_inodes.h> 23#include <linux/anon_inodes.h>
23 24
@@ -79,6 +80,30 @@ static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
79 } 80 }
80} 81}
81 82
83/*
84 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
85 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
86 */
87static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
88{
89 switch (event_id) {
90 case NVGPU_EVENT_ID_BPT_INT:
91 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
92 case NVGPU_EVENT_ID_BPT_PAUSE:
93 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
94 case NVGPU_EVENT_ID_BLOCKING_SYNC:
95 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
96 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
97 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
98 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
99 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
100 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
101 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
102 }
103
104 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
105}
106
82void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, 107void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
83 int __event_id) 108 int __event_id)
84{ 109{
@@ -107,6 +132,57 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
107 nvgpu_mutex_release(&event_id_data->lock); 132 nvgpu_mutex_release(&event_id_data->lock);
108} 133}
109 134
135static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
136{
137 unsigned int mask = 0;
138 struct gk20a_event_id_data *event_id_data = filep->private_data;
139 struct gk20a *g = event_id_data->g;
140 u32 event_id = event_id_data->event_id;
141 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
142
143 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
144
145 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
146
147 nvgpu_mutex_acquire(&event_id_data->lock);
148
149 if (event_id_data->event_posted) {
150 gk20a_dbg_info(
151 "found pending event_id=%d on TSG=%d\n",
152 event_id, tsg->tsgid);
153 mask = (POLLPRI | POLLIN);
154 event_id_data->event_posted = false;
155 }
156
157 nvgpu_mutex_release(&event_id_data->lock);
158
159 return mask;
160}
161
162static int gk20a_event_id_release(struct inode *inode, struct file *filp)
163{
164 struct gk20a_event_id_data *event_id_data = filp->private_data;
165 struct gk20a *g = event_id_data->g;
166 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
167
168 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
169 nvgpu_list_del(&event_id_data->event_id_node);
170 nvgpu_mutex_release(&tsg->event_id_list_lock);
171
172 nvgpu_mutex_destroy(&event_id_data->lock);
173 gk20a_put(g);
174 nvgpu_kfree(g, event_id_data);
175 filp->private_data = NULL;
176
177 return 0;
178}
179
180const struct file_operations gk20a_event_id_ops = {
181 .owner = THIS_MODULE,
182 .poll = gk20a_event_id_poll,
183 .release = gk20a_event_id_release,
184};
185
110static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg, 186static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
111 int event_id, 187 int event_id,
112 int *fd) 188 int *fd)
@@ -152,7 +228,6 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
152 } 228 }
153 event_id_data->g = g; 229 event_id_data->g = g;
154 event_id_data->id = tsg->tsgid; 230 event_id_data->id = tsg->tsgid;
155 event_id_data->is_tsg = true;
156 event_id_data->event_id = event_id; 231 event_id_data->event_id = event_id;
157 232
158 nvgpu_cond_init(&event_id_data->event_id_wq); 233 nvgpu_cond_init(&event_id_data->event_id_wq);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
index 3ff68ec2..7900f53f 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
@@ -125,27 +125,22 @@ int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
125static void vgpu_handle_channel_event(struct gk20a *g, 125static void vgpu_handle_channel_event(struct gk20a *g,
126 struct tegra_vgpu_channel_event_info *info) 126 struct tegra_vgpu_channel_event_info *info)
127{ 127{
128 struct tsg_gk20a *tsg;
129
130 if (!info->is_tsg) {
131 nvgpu_err(g, "channel event posted");
132 return;
133 }
134
128 if (info->id >= g->fifo.num_channels || 135 if (info->id >= g->fifo.num_channels ||
129 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) { 136 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
130 nvgpu_err(g, "invalid channel event"); 137 nvgpu_err(g, "invalid channel event");
131 return; 138 return;
132 } 139 }
133 140
134 if (info->is_tsg) { 141 tsg = &g->fifo.tsg[info->id];
135 struct tsg_gk20a *tsg = &g->fifo.tsg[info->id];
136 142
137 gk20a_tsg_event_id_post_event(tsg, info->event_id); 143 gk20a_tsg_event_id_post_event(tsg, info->event_id);
138 } else {
139 struct channel_gk20a *ch = &g->fifo.channel[info->id];
140
141 if (!gk20a_channel_get(ch)) {
142 nvgpu_err(g, "invalid channel %d for event %d",
143 (int)info->id, (int)info->event_id);
144 return;
145 }
146 gk20a_channel_event_id_post_event(ch, info->event_id);
147 gk20a_channel_put(ch);
148 }
149} 144}
150 145
151 146