summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-12-15 13:25:22 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-12-28 13:01:32 -0500
commitf19f22fcc8ef21b363b873c499cbd2e690af29f8 (patch)
tree02d6b8280af305d0339ed860e331ab091d4b49d2 /drivers/gpu/nvgpu
parentaa52601f620423fdd98b79e2c2c5e1d767a5f685 (diff)
gpu: nvgpu: Remove support for channel events
Remove support for events for bare channels. All users have already moved to TSGs and TSG events. Bug 1842197 Change-Id: Ib3ff68134ad9515ee761d0f0e19a3150a0b744ab Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1618906 Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c260
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.h2
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg.c77
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c23
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c22
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h26
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c1
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c34
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h19
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c24
10 files changed, 122 insertions, 366 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 5319b829..65d560c7 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -709,262 +709,6 @@ notif_clean_up:
709 return ret; 709 return ret;
710} 710}
711 711
712static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
713{
714 unsigned int mask = 0;
715 struct gk20a_event_id_data *event_id_data = filep->private_data;
716 struct gk20a *g = event_id_data->g;
717 u32 event_id = event_id_data->event_id;
718
719 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
720
721 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
722
723 nvgpu_mutex_acquire(&event_id_data->lock);
724
725 if (event_id_data->is_tsg) {
726 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
727
728 if (event_id_data->event_posted) {
729 gk20a_dbg_info(
730 "found pending event_id=%d on TSG=%d\n",
731 event_id, tsg->tsgid);
732 mask = (POLLPRI | POLLIN);
733 event_id_data->event_posted = false;
734 }
735 } else {
736 struct channel_gk20a *ch = g->fifo.channel
737 + event_id_data->id;
738
739 if (event_id_data->event_posted) {
740 gk20a_dbg_info(
741 "found pending event_id=%d on chid=%d\n",
742 event_id, ch->chid);
743 mask = (POLLPRI | POLLIN);
744 event_id_data->event_posted = false;
745 }
746 }
747
748 nvgpu_mutex_release(&event_id_data->lock);
749
750 return mask;
751}
752
753static int gk20a_event_id_release(struct inode *inode, struct file *filp)
754{
755 struct gk20a_event_id_data *event_id_data = filp->private_data;
756 struct gk20a *g = event_id_data->g;
757
758 if (event_id_data->is_tsg) {
759 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
760
761 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
762 nvgpu_list_del(&event_id_data->event_id_node);
763 nvgpu_mutex_release(&tsg->event_id_list_lock);
764 } else {
765 struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
766
767 nvgpu_mutex_acquire(&ch->event_id_list_lock);
768 nvgpu_list_del(&event_id_data->event_id_node);
769 nvgpu_mutex_release(&ch->event_id_list_lock);
770 }
771
772 nvgpu_mutex_destroy(&event_id_data->lock);
773 gk20a_put(g);
774 nvgpu_kfree(g, event_id_data);
775 filp->private_data = NULL;
776
777 return 0;
778}
779
780const struct file_operations gk20a_event_id_ops = {
781 .owner = THIS_MODULE,
782 .poll = gk20a_event_id_poll,
783 .release = gk20a_event_id_release,
784};
785
786static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
787 u32 event_id,
788 struct gk20a_event_id_data **event_id_data)
789{
790 struct gk20a_event_id_data *local_event_id_data;
791 bool event_found = false;
792
793 nvgpu_mutex_acquire(&ch->event_id_list_lock);
794 nvgpu_list_for_each_entry(local_event_id_data, &ch->event_id_list,
795 gk20a_event_id_data, event_id_node) {
796 if (local_event_id_data->event_id == event_id) {
797 event_found = true;
798 break;
799 }
800 }
801 nvgpu_mutex_release(&ch->event_id_list_lock);
802
803 if (event_found) {
804 *event_id_data = local_event_id_data;
805 return 0;
806 } else {
807 return -1;
808 }
809}
810
811/*
812 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
813 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
814 */
815u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
816{
817 switch (event_id) {
818 case NVGPU_EVENT_ID_BPT_INT:
819 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
820 case NVGPU_EVENT_ID_BPT_PAUSE:
821 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
822 case NVGPU_EVENT_ID_BLOCKING_SYNC:
823 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
824 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
825 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
826 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
827 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
828 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
829 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
830 }
831
832 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
833}
834
835void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
836 u32 __event_id)
837{
838 struct gk20a_event_id_data *event_id_data;
839 u32 event_id;
840 int err = 0;
841
842 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
843 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
844 return;
845
846 err = gk20a_channel_get_event_data_from_id(ch, event_id,
847 &event_id_data);
848 if (err)
849 return;
850
851 nvgpu_mutex_acquire(&event_id_data->lock);
852
853 gk20a_dbg_info(
854 "posting event for event_id=%d on ch=%d\n",
855 event_id, ch->chid);
856 event_id_data->event_posted = true;
857
858 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
859
860 nvgpu_mutex_release(&event_id_data->lock);
861}
862
863static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
864 int event_id,
865 int *fd)
866{
867 struct gk20a *g;
868 int err = 0;
869 int local_fd;
870 struct file *file;
871 char name[64];
872 struct gk20a_event_id_data *event_id_data;
873
874 g = gk20a_get(ch->g);
875 if (!g)
876 return -ENODEV;
877
878 err = gk20a_channel_get_event_data_from_id(ch,
879 event_id, &event_id_data);
880 if (err == 0) {
881 /* We already have event enabled */
882 err = -EINVAL;
883 goto free_ref;
884 }
885
886 err = get_unused_fd_flags(O_RDWR);
887 if (err < 0)
888 goto free_ref;
889 local_fd = err;
890
891 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
892 event_id, local_fd);
893 file = anon_inode_getfile(name, &gk20a_event_id_ops,
894 NULL, O_RDWR);
895 if (IS_ERR(file)) {
896 err = PTR_ERR(file);
897 goto clean_up;
898 }
899
900 event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
901 if (!event_id_data) {
902 err = -ENOMEM;
903 goto clean_up_file;
904 }
905 event_id_data->g = g;
906 event_id_data->id = ch->chid;
907 event_id_data->is_tsg = false;
908 event_id_data->event_id = event_id;
909
910 nvgpu_cond_init(&event_id_data->event_id_wq);
911 err = nvgpu_mutex_init(&event_id_data->lock);
912 if (err)
913 goto clean_up_free;
914 nvgpu_init_list_node(&event_id_data->event_id_node);
915
916 nvgpu_mutex_acquire(&ch->event_id_list_lock);
917 nvgpu_list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
918 nvgpu_mutex_release(&ch->event_id_list_lock);
919
920 fd_install(local_fd, file);
921 file->private_data = event_id_data;
922
923 *fd = local_fd;
924
925 return 0;
926
927clean_up_free:
928 nvgpu_kfree(g, event_id_data);
929clean_up_file:
930 fput(file);
931clean_up:
932 put_unused_fd(local_fd);
933free_ref:
934 gk20a_put(g);
935 return err;
936}
937
938static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
939 struct nvgpu_event_id_ctrl_args *args)
940{
941 int err = 0;
942 int fd = -1;
943
944 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
945 return -EINVAL;
946
947 if (gk20a_is_channel_marked_as_tsg(ch))
948 return -EINVAL;
949
950 switch (args->cmd) {
951 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
952 err = gk20a_channel_event_id_enable(ch, args->event_id, &fd);
953 if (!err)
954 args->event_fd = fd;
955 break;
956
957 default:
958 nvgpu_err(ch->g,
959 "unrecognized channel event id cmd: 0x%x",
960 args->cmd);
961 err = -EINVAL;
962 break;
963 }
964
965 return err;
966}
967
968static int gk20a_channel_zcull_bind(struct channel_gk20a *ch, 712static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
969 struct nvgpu_zcull_bind_args *args) 713 struct nvgpu_zcull_bind_args *args)
970{ 714{
@@ -1467,10 +1211,6 @@ long gk20a_channel_ioctl(struct file *filp,
1467 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true); 1211 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true);
1468 gk20a_idle(ch->g); 1212 gk20a_idle(ch->g);
1469 break; 1213 break;
1470 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
1471 err = gk20a_channel_event_id_ctrl(ch,
1472 (struct nvgpu_event_id_ctrl_args *)buf);
1473 break;
1474#ifdef CONFIG_GK20A_CYCLE_STATS 1214#ifdef CONFIG_GK20A_CYCLE_STATS
1475 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT: 1215 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
1476 err = gk20a_busy(ch->g); 1216 err = gk20a_busy(ch->g);
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.h b/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
index c37108c4..48cff1ea 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
@@ -39,10 +39,8 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
39int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch); 39int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
40void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch); 40void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
41 41
42extern const struct file_operations gk20a_event_id_ops;
43extern const struct file_operations gk20a_channel_ops; 42extern const struct file_operations gk20a_channel_ops;
44 43
45u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id);
46u32 nvgpu_get_common_runlist_level(u32 level); 44u32 nvgpu_get_common_runlist_level(u32 level);
47 45
48u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags); 46u32 nvgpu_get_ioctl_graphics_preempt_mode_flags(u32 graphics_preempt_mode_flags);
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
index b17d7e74..445199c2 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
@@ -18,6 +18,7 @@
18#include <linux/file.h> 18#include <linux/file.h>
19#include <linux/cdev.h> 19#include <linux/cdev.h>
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/poll.h>
21#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
22#include <linux/anon_inodes.h> 23#include <linux/anon_inodes.h>
23 24
@@ -79,6 +80,30 @@ static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
79 } 80 }
80} 81}
81 82
83/*
84 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
85 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
86 */
87static u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
88{
89 switch (event_id) {
90 case NVGPU_EVENT_ID_BPT_INT:
91 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
92 case NVGPU_EVENT_ID_BPT_PAUSE:
93 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
94 case NVGPU_EVENT_ID_BLOCKING_SYNC:
95 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
96 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
97 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
98 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
99 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
100 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
101 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
102 }
103
104 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
105}
106
82void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, 107void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
83 int __event_id) 108 int __event_id)
84{ 109{
@@ -107,6 +132,57 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
107 nvgpu_mutex_release(&event_id_data->lock); 132 nvgpu_mutex_release(&event_id_data->lock);
108} 133}
109 134
135static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
136{
137 unsigned int mask = 0;
138 struct gk20a_event_id_data *event_id_data = filep->private_data;
139 struct gk20a *g = event_id_data->g;
140 u32 event_id = event_id_data->event_id;
141 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
142
143 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
144
145 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
146
147 nvgpu_mutex_acquire(&event_id_data->lock);
148
149 if (event_id_data->event_posted) {
150 gk20a_dbg_info(
151 "found pending event_id=%d on TSG=%d\n",
152 event_id, tsg->tsgid);
153 mask = (POLLPRI | POLLIN);
154 event_id_data->event_posted = false;
155 }
156
157 nvgpu_mutex_release(&event_id_data->lock);
158
159 return mask;
160}
161
162static int gk20a_event_id_release(struct inode *inode, struct file *filp)
163{
164 struct gk20a_event_id_data *event_id_data = filp->private_data;
165 struct gk20a *g = event_id_data->g;
166 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
167
168 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
169 nvgpu_list_del(&event_id_data->event_id_node);
170 nvgpu_mutex_release(&tsg->event_id_list_lock);
171
172 nvgpu_mutex_destroy(&event_id_data->lock);
173 gk20a_put(g);
174 nvgpu_kfree(g, event_id_data);
175 filp->private_data = NULL;
176
177 return 0;
178}
179
180const struct file_operations gk20a_event_id_ops = {
181 .owner = THIS_MODULE,
182 .poll = gk20a_event_id_poll,
183 .release = gk20a_event_id_release,
184};
185
110static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg, 186static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
111 int event_id, 187 int event_id,
112 int *fd) 188 int *fd)
@@ -152,7 +228,6 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
152 } 228 }
153 event_id_data->g = g; 229 event_id_data->g = g;
154 event_id_data->id = tsg->tsgid; 230 event_id_data->id = tsg->tsgid;
155 event_id_data->is_tsg = true;
156 event_id_data->event_id = event_id; 231 event_id_data->event_id = event_id;
157 232
158 nvgpu_cond_init(&event_id_data->event_id_wq); 233 nvgpu_cond_init(&event_id_data->event_id_wq);
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
index 3ff68ec2..7900f53f 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/vgpu.c
@@ -125,27 +125,22 @@ int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
125static void vgpu_handle_channel_event(struct gk20a *g, 125static void vgpu_handle_channel_event(struct gk20a *g,
126 struct tegra_vgpu_channel_event_info *info) 126 struct tegra_vgpu_channel_event_info *info)
127{ 127{
128 struct tsg_gk20a *tsg;
129
130 if (!info->is_tsg) {
131 nvgpu_err(g, "channel event posted");
132 return;
133 }
134
128 if (info->id >= g->fifo.num_channels || 135 if (info->id >= g->fifo.num_channels ||
129 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) { 136 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
130 nvgpu_err(g, "invalid channel event"); 137 nvgpu_err(g, "invalid channel event");
131 return; 138 return;
132 } 139 }
133 140
134 if (info->is_tsg) { 141 tsg = &g->fifo.tsg[info->id];
135 struct tsg_gk20a *tsg = &g->fifo.tsg[info->id];
136 142
137 gk20a_tsg_event_id_post_event(tsg, info->event_id); 143 gk20a_tsg_event_id_post_event(tsg, info->event_id);
138 } else {
139 struct channel_gk20a *ch = &g->fifo.channel[info->id];
140
141 if (!gk20a_channel_get(ch)) {
142 nvgpu_err(g, "invalid channel %d for event %d",
143 (int)info->id, (int)info->event_id);
144 return;
145 }
146 gk20a_channel_event_id_post_event(ch, info->event_id);
147 gk20a_channel_put(ch);
148 }
149} 144}
150 145
151 146
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index e10be3c9..07ae5a16 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -689,7 +689,6 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
689{ 689{
690 struct fifo_gk20a *f = &g->fifo; 690 struct fifo_gk20a *f = &g->fifo;
691 struct channel_gk20a *ch; 691 struct channel_gk20a *ch;
692 struct gk20a_event_id_data *event_id_data, *event_id_data_temp;
693 692
694 /* compatibility with existing code */ 693 /* compatibility with existing code */
695 if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) { 694 if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) {
@@ -730,16 +729,6 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
730 ch->pid = current->pid; 729 ch->pid = current->pid;
731 ch->tgid = current->tgid; /* process granularity for FECS traces */ 730 ch->tgid = current->tgid; /* process granularity for FECS traces */
732 731
733 /* unhook all events created on this channel */
734 nvgpu_mutex_acquire(&ch->event_id_list_lock);
735 nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp,
736 &ch->event_id_list,
737 gk20a_event_id_data,
738 event_id_node) {
739 nvgpu_list_del(&event_id_data->event_id_node);
740 }
741 nvgpu_mutex_release(&ch->event_id_list_lock);
742
743 /* By default, channel is regular (non-TSG) channel */ 732 /* By default, channel is regular (non-TSG) channel */
744 ch->tsgid = NVGPU_INVALID_TSG_ID; 733 ch->tsgid = NVGPU_INVALID_TSG_ID;
745 734
@@ -2134,7 +2123,6 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2134 2123
2135 nvgpu_init_list_node(&c->joblist.dynamic.jobs); 2124 nvgpu_init_list_node(&c->joblist.dynamic.jobs);
2136 nvgpu_init_list_node(&c->dbg_s_list); 2125 nvgpu_init_list_node(&c->dbg_s_list);
2137 nvgpu_init_list_node(&c->event_id_list);
2138 nvgpu_init_list_node(&c->worker_item); 2126 nvgpu_init_list_node(&c->worker_item);
2139 2127
2140 err = nvgpu_mutex_init(&c->ioctl_lock); 2128 err = nvgpu_mutex_init(&c->ioctl_lock);
@@ -2157,19 +2145,14 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2157 if (err) 2145 if (err)
2158 goto fail_5; 2146 goto fail_5;
2159#endif 2147#endif
2160 err = nvgpu_mutex_init(&c->event_id_list_lock);
2161 if (err)
2162 goto fail_6;
2163 err = nvgpu_mutex_init(&c->dbg_s_lock); 2148 err = nvgpu_mutex_init(&c->dbg_s_lock);
2164 if (err) 2149 if (err)
2165 goto fail_7; 2150 goto fail_6;
2166 2151
2167 nvgpu_list_add(&c->free_chs, &g->fifo.free_chs); 2152 nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
2168 2153
2169 return 0; 2154 return 0;
2170 2155
2171fail_7:
2172 nvgpu_mutex_destroy(&c->event_id_list_lock);
2173fail_6: 2156fail_6:
2174#if defined(CONFIG_GK20A_CYCLE_STATS) 2157#if defined(CONFIG_GK20A_CYCLE_STATS)
2175 nvgpu_mutex_destroy(&c->cs_client_mutex); 2158 nvgpu_mutex_destroy(&c->cs_client_mutex);
@@ -2286,9 +2269,6 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
2286 2269
2287 gk20a_tsg_event_id_post_event(tsg, 2270 gk20a_tsg_event_id_post_event(tsg,
2288 NVGPU_EVENT_ID_BLOCKING_SYNC); 2271 NVGPU_EVENT_ID_BLOCKING_SYNC);
2289 } else {
2290 gk20a_channel_event_id_post_event(c,
2291 NVGPU_EVENT_ID_BLOCKING_SYNC);
2292 } 2272 }
2293 } 2273 }
2294 /* 2274 /*
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index e6f73cf6..b43c5638 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -124,27 +124,6 @@ struct channel_gk20a_timeout {
124 u64 pb_get; 124 u64 pb_get;
125}; 125};
126 126
127struct gk20a_event_id_data {
128 struct gk20a *g;
129
130 int id; /* ch or tsg */
131 bool is_tsg;
132 u32 event_id;
133
134 bool event_posted;
135
136 struct nvgpu_cond event_id_wq;
137 struct nvgpu_mutex lock;
138 struct nvgpu_list_node event_id_node;
139};
140
141static inline struct gk20a_event_id_data *
142gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
143{
144 return (struct gk20a_event_id_data *)
145 ((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
146};
147
148/* 127/*
149 * Track refcount actions, saving their stack traces. This number specifies how 128 * Track refcount actions, saving their stack traces. This number specifies how
150 * many most recent actions are stored in a buffer. Set to 0 to disable. 128 129 * many most recent actions are stored in a buffer. Set to 0 to disable. 128
@@ -265,9 +244,6 @@ struct channel_gk20a {
265 struct nvgpu_mutex dbg_s_lock; 244 struct nvgpu_mutex dbg_s_lock;
266 struct nvgpu_list_node dbg_s_list; 245 struct nvgpu_list_node dbg_s_list;
267 246
268 struct nvgpu_list_node event_id_list;
269 struct nvgpu_mutex event_id_list_lock;
270
271 bool has_timedout; 247 bool has_timedout;
272 u32 timeout_ms_max; 248 u32 timeout_ms_max;
273 bool timeout_debug_dump; 249 bool timeout_debug_dump;
@@ -385,8 +361,6 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
385 int *__timeslice_timeout, int *__timeslice_scale); 361 int *__timeslice_timeout, int *__timeslice_scale);
386int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch, 362int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
387 u32 level); 363 u32 level);
388void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
389 u32 event_id);
390 364
391int channel_gk20a_alloc_job(struct channel_gk20a *c, 365int channel_gk20a_alloc_job(struct channel_gk20a *c,
392 struct channel_gk20a_job **job_out); 366 struct channel_gk20a_job **job_out);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c5c06df9..fc71e907 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -564,7 +564,6 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
564 nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); 564 nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex);
565 nvgpu_mutex_destroy(&c->cs_client_mutex); 565 nvgpu_mutex_destroy(&c->cs_client_mutex);
566#endif 566#endif
567 nvgpu_mutex_destroy(&c->event_id_list_lock);
568 nvgpu_mutex_destroy(&c->dbg_s_lock); 567 nvgpu_mutex_destroy(&c->dbg_s_lock);
569 568
570 } 569 }
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index ea4d1d24..f07a54b1 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -5256,16 +5256,10 @@ static int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
5256{ 5256{
5257 struct fifo_gk20a *f = &g->fifo; 5257 struct fifo_gk20a *f = &g->fifo;
5258 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 5258 struct channel_gk20a *ch = &f->channel[isr_data->chid];
5259 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5259 5260
5260 if (gk20a_is_channel_marked_as_tsg(ch)) { 5261 gk20a_tsg_event_id_post_event(tsg,
5261 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; 5262 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
5262
5263 gk20a_tsg_event_id_post_event(tsg,
5264 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
5265 } else {
5266 gk20a_channel_event_id_post_event(ch,
5267 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
5268 }
5269 5263
5270 nvgpu_cond_broadcast(&ch->semaphore_wq); 5264 nvgpu_cond_broadcast(&ch->semaphore_wq);
5271 5265
@@ -5806,26 +5800,16 @@ static int gk20a_gr_post_bpt_events(struct gk20a *g, struct channel_gk20a *ch,
5806 u32 global_esr) 5800 u32 global_esr)
5807{ 5801{
5808 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) { 5802 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) {
5809 if (gk20a_is_channel_marked_as_tsg(ch)) { 5803 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5810 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5811 5804
5812 gk20a_tsg_event_id_post_event(tsg, 5805 gk20a_tsg_event_id_post_event(tsg,
5813 NVGPU_EVENT_ID_BPT_INT); 5806 NVGPU_EVENT_ID_BPT_INT);
5814 } else {
5815 gk20a_channel_event_id_post_event(ch,
5816 NVGPU_EVENT_ID_BPT_INT);
5817 }
5818 } 5807 }
5819 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) { 5808 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) {
5820 if (gk20a_is_channel_marked_as_tsg(ch)) { 5809 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5821 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5822 5810
5823 gk20a_tsg_event_id_post_event(tsg, 5811 gk20a_tsg_event_id_post_event(tsg,
5824 NVGPU_EVENT_ID_BPT_PAUSE); 5812 NVGPU_EVENT_ID_BPT_PAUSE);
5825 } else {
5826 gk20a_channel_event_id_post_event(ch,
5827 NVGPU_EVENT_ID_BPT_PAUSE);
5828 }
5829 } 5813 }
5830 5814
5831 return 0; 5815 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index 5e2b9b82..08fe0365 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -87,5 +87,24 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg);
87int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg, 87int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
88 u32 priority); 88 u32 priority);
89 89
90struct gk20a_event_id_data {
91 struct gk20a *g;
92
93 int id; /* ch or tsg */
94 u32 event_id;
95
96 bool event_posted;
97
98 struct nvgpu_cond event_id_wq;
99 struct nvgpu_mutex lock;
100 struct nvgpu_list_node event_id_node;
101};
102
103static inline struct gk20a_event_id_data *
104gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
105{
106 return (struct gk20a_event_id_data *)
107 ((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
108};
90 109
91#endif /* __TSG_GK20A_H_ */ 110#endif /* __TSG_GK20A_H_ */
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 3b63626c..b5194223 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1709,6 +1709,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1709{ 1709{
1710 int ret; 1710 int ret;
1711 struct gr_ctx_desc *gr_ctx = fault_ch->ch_ctx.gr_ctx; 1711 struct gr_ctx_desc *gr_ctx = fault_ch->ch_ctx.gr_ctx;
1712 struct tsg_gk20a *tsg;
1712 1713
1713 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1714 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
1714 1715
@@ -1773,15 +1774,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
1773 gr_ctx->cilp_preempt_pending = true; 1774 gr_ctx->cilp_preempt_pending = true;
1774 g->gr.cilp_preempt_pending_chid = fault_ch->chid; 1775 g->gr.cilp_preempt_pending_chid = fault_ch->chid;
1775 1776
1776 if (gk20a_is_channel_marked_as_tsg(fault_ch)) { 1777 tsg = &g->fifo.tsg[fault_ch->tsgid];
1777 struct tsg_gk20a *tsg = &g->fifo.tsg[fault_ch->tsgid];
1778 1778
1779 gk20a_tsg_event_id_post_event(tsg, 1779 gk20a_tsg_event_id_post_event(tsg,
1780 NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED); 1780 NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
1781 } else {
1782 gk20a_channel_event_id_post_event(fault_ch,
1783 NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED);
1784 }
1785 1781
1786 return 0; 1782 return 0;
1787} 1783}
@@ -1948,6 +1944,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
1948 struct channel_gk20a *ch; 1944 struct channel_gk20a *ch;
1949 int chid = -1; 1945 int chid = -1;
1950 int ret = 0; 1946 int ret = 0;
1947 struct tsg_gk20a *tsg;
1951 1948
1952 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 1949 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
1953 1950
@@ -1984,15 +1981,10 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
1984 /* Post events to UMD */ 1981 /* Post events to UMD */
1985 gk20a_dbg_gpu_post_events(ch); 1982 gk20a_dbg_gpu_post_events(ch);
1986 1983
1987 if (gk20a_is_channel_marked_as_tsg(ch)) { 1984 tsg = &g->fifo.tsg[ch->tsgid];
1988 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
1989 1985
1990 gk20a_tsg_event_id_post_event(tsg, 1986 gk20a_tsg_event_id_post_event(tsg,
1991 NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE); 1987 NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
1992 } else {
1993 gk20a_channel_event_id_post_event(ch,
1994 NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
1995 }
1996 1988
1997 gk20a_channel_put(ch); 1989 gk20a_channel_put(ch);
1998 } 1990 }