summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-12-15 13:25:22 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-12-28 13:01:32 -0500
commitf19f22fcc8ef21b363b873c499cbd2e690af29f8 (patch)
tree02d6b8280af305d0339ed860e331ab091d4b49d2 /drivers/gpu/nvgpu/common/linux/ioctl_channel.c
parentaa52601f620423fdd98b79e2c2c5e1d767a5f685 (diff)
gpu: nvgpu: Remove support for channel events
Remove support for events for bare channels. All users have already moved to TSGs and TSG events. Bug 1842197 Change-Id: Ib3ff68134ad9515ee761d0f0e19a3150a0b744ab Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1618906 Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_channel.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c260
1 files changed, 0 insertions, 260 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 5319b829..65d560c7 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -709,262 +709,6 @@ notif_clean_up:
709 return ret; 709 return ret;
710} 710}
711 711
712static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
713{
714 unsigned int mask = 0;
715 struct gk20a_event_id_data *event_id_data = filep->private_data;
716 struct gk20a *g = event_id_data->g;
717 u32 event_id = event_id_data->event_id;
718
719 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
720
721 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
722
723 nvgpu_mutex_acquire(&event_id_data->lock);
724
725 if (event_id_data->is_tsg) {
726 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
727
728 if (event_id_data->event_posted) {
729 gk20a_dbg_info(
730 "found pending event_id=%d on TSG=%d\n",
731 event_id, tsg->tsgid);
732 mask = (POLLPRI | POLLIN);
733 event_id_data->event_posted = false;
734 }
735 } else {
736 struct channel_gk20a *ch = g->fifo.channel
737 + event_id_data->id;
738
739 if (event_id_data->event_posted) {
740 gk20a_dbg_info(
741 "found pending event_id=%d on chid=%d\n",
742 event_id, ch->chid);
743 mask = (POLLPRI | POLLIN);
744 event_id_data->event_posted = false;
745 }
746 }
747
748 nvgpu_mutex_release(&event_id_data->lock);
749
750 return mask;
751}
752
753static int gk20a_event_id_release(struct inode *inode, struct file *filp)
754{
755 struct gk20a_event_id_data *event_id_data = filp->private_data;
756 struct gk20a *g = event_id_data->g;
757
758 if (event_id_data->is_tsg) {
759 struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id;
760
761 nvgpu_mutex_acquire(&tsg->event_id_list_lock);
762 nvgpu_list_del(&event_id_data->event_id_node);
763 nvgpu_mutex_release(&tsg->event_id_list_lock);
764 } else {
765 struct channel_gk20a *ch = g->fifo.channel + event_id_data->id;
766
767 nvgpu_mutex_acquire(&ch->event_id_list_lock);
768 nvgpu_list_del(&event_id_data->event_id_node);
769 nvgpu_mutex_release(&ch->event_id_list_lock);
770 }
771
772 nvgpu_mutex_destroy(&event_id_data->lock);
773 gk20a_put(g);
774 nvgpu_kfree(g, event_id_data);
775 filp->private_data = NULL;
776
777 return 0;
778}
779
780const struct file_operations gk20a_event_id_ops = {
781 .owner = THIS_MODULE,
782 .poll = gk20a_event_id_poll,
783 .release = gk20a_event_id_release,
784};
785
786static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
787 u32 event_id,
788 struct gk20a_event_id_data **event_id_data)
789{
790 struct gk20a_event_id_data *local_event_id_data;
791 bool event_found = false;
792
793 nvgpu_mutex_acquire(&ch->event_id_list_lock);
794 nvgpu_list_for_each_entry(local_event_id_data, &ch->event_id_list,
795 gk20a_event_id_data, event_id_node) {
796 if (local_event_id_data->event_id == event_id) {
797 event_found = true;
798 break;
799 }
800 }
801 nvgpu_mutex_release(&ch->event_id_list_lock);
802
803 if (event_found) {
804 *event_id_data = local_event_id_data;
805 return 0;
806 } else {
807 return -1;
808 }
809}
810
811/*
812 * Convert common event_id of the form NVGPU_EVENT_ID_* to Linux specific
813 * event_id of the form NVGPU_IOCTL_CHANNEL_EVENT_ID_* which is used in IOCTLs
814 */
815u32 nvgpu_event_id_to_ioctl_channel_event_id(u32 event_id)
816{
817 switch (event_id) {
818 case NVGPU_EVENT_ID_BPT_INT:
819 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_INT;
820 case NVGPU_EVENT_ID_BPT_PAUSE:
821 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BPT_PAUSE;
822 case NVGPU_EVENT_ID_BLOCKING_SYNC:
823 return NVGPU_IOCTL_CHANNEL_EVENT_ID_BLOCKING_SYNC;
824 case NVGPU_EVENT_ID_CILP_PREEMPTION_STARTED:
825 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_STARTED;
826 case NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE:
827 return NVGPU_IOCTL_CHANNEL_EVENT_ID_CILP_PREEMPTION_COMPLETE;
828 case NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN:
829 return NVGPU_IOCTL_CHANNEL_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN;
830 }
831
832 return NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX;
833}
834
835void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
836 u32 __event_id)
837{
838 struct gk20a_event_id_data *event_id_data;
839 u32 event_id;
840 int err = 0;
841
842 event_id = nvgpu_event_id_to_ioctl_channel_event_id(__event_id);
843 if (event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
844 return;
845
846 err = gk20a_channel_get_event_data_from_id(ch, event_id,
847 &event_id_data);
848 if (err)
849 return;
850
851 nvgpu_mutex_acquire(&event_id_data->lock);
852
853 gk20a_dbg_info(
854 "posting event for event_id=%d on ch=%d\n",
855 event_id, ch->chid);
856 event_id_data->event_posted = true;
857
858 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
859
860 nvgpu_mutex_release(&event_id_data->lock);
861}
862
863static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
864 int event_id,
865 int *fd)
866{
867 struct gk20a *g;
868 int err = 0;
869 int local_fd;
870 struct file *file;
871 char name[64];
872 struct gk20a_event_id_data *event_id_data;
873
874 g = gk20a_get(ch->g);
875 if (!g)
876 return -ENODEV;
877
878 err = gk20a_channel_get_event_data_from_id(ch,
879 event_id, &event_id_data);
880 if (err == 0) {
881 /* We already have event enabled */
882 err = -EINVAL;
883 goto free_ref;
884 }
885
886 err = get_unused_fd_flags(O_RDWR);
887 if (err < 0)
888 goto free_ref;
889 local_fd = err;
890
891 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
892 event_id, local_fd);
893 file = anon_inode_getfile(name, &gk20a_event_id_ops,
894 NULL, O_RDWR);
895 if (IS_ERR(file)) {
896 err = PTR_ERR(file);
897 goto clean_up;
898 }
899
900 event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
901 if (!event_id_data) {
902 err = -ENOMEM;
903 goto clean_up_file;
904 }
905 event_id_data->g = g;
906 event_id_data->id = ch->chid;
907 event_id_data->is_tsg = false;
908 event_id_data->event_id = event_id;
909
910 nvgpu_cond_init(&event_id_data->event_id_wq);
911 err = nvgpu_mutex_init(&event_id_data->lock);
912 if (err)
913 goto clean_up_free;
914 nvgpu_init_list_node(&event_id_data->event_id_node);
915
916 nvgpu_mutex_acquire(&ch->event_id_list_lock);
917 nvgpu_list_add_tail(&event_id_data->event_id_node, &ch->event_id_list);
918 nvgpu_mutex_release(&ch->event_id_list_lock);
919
920 fd_install(local_fd, file);
921 file->private_data = event_id_data;
922
923 *fd = local_fd;
924
925 return 0;
926
927clean_up_free:
928 nvgpu_kfree(g, event_id_data);
929clean_up_file:
930 fput(file);
931clean_up:
932 put_unused_fd(local_fd);
933free_ref:
934 gk20a_put(g);
935 return err;
936}
937
938static int gk20a_channel_event_id_ctrl(struct channel_gk20a *ch,
939 struct nvgpu_event_id_ctrl_args *args)
940{
941 int err = 0;
942 int fd = -1;
943
944 if (args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
945 return -EINVAL;
946
947 if (gk20a_is_channel_marked_as_tsg(ch))
948 return -EINVAL;
949
950 switch (args->cmd) {
951 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
952 err = gk20a_channel_event_id_enable(ch, args->event_id, &fd);
953 if (!err)
954 args->event_fd = fd;
955 break;
956
957 default:
958 nvgpu_err(ch->g,
959 "unrecognized channel event id cmd: 0x%x",
960 args->cmd);
961 err = -EINVAL;
962 break;
963 }
964
965 return err;
966}
967
968static int gk20a_channel_zcull_bind(struct channel_gk20a *ch, 712static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
969 struct nvgpu_zcull_bind_args *args) 713 struct nvgpu_zcull_bind_args *args)
970{ 714{
@@ -1467,10 +1211,6 @@ long gk20a_channel_ioctl(struct file *filp,
1467 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true); 1211 NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR, true);
1468 gk20a_idle(ch->g); 1212 gk20a_idle(ch->g);
1469 break; 1213 break;
1470 case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
1471 err = gk20a_channel_event_id_ctrl(ch,
1472 (struct nvgpu_event_id_ctrl_args *)buf);
1473 break;
1474#ifdef CONFIG_GK20A_CYCLE_STATS 1214#ifdef CONFIG_GK20A_CYCLE_STATS
1475 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT: 1215 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
1476 err = gk20a_busy(ch->g); 1216 err = gk20a_busy(ch->g);